From 51fa96660ee166c90f8d6bde2e46f2ae272fa685 Mon Sep 17 00:00:00 2001 From: Adam Sawicki Date: Wed, 3 Oct 2018 13:44:29 +0200 Subject: [PATCH 01/10] Wrote basic test for sparse binding. --- src/SparseBindingTest.cpp | 248 ++++++++++++++++++++++++++++++++++++++ src/SparseBindingTest.h | 7 ++ src/Tests.cpp | 4 +- src/VulkanSample.cpp | 86 ++++++++++--- 4 files changed, 328 insertions(+), 17 deletions(-) create mode 100644 src/SparseBindingTest.cpp create mode 100644 src/SparseBindingTest.h diff --git a/src/SparseBindingTest.cpp b/src/SparseBindingTest.cpp new file mode 100644 index 0000000..26a1bfb --- /dev/null +++ b/src/SparseBindingTest.cpp @@ -0,0 +1,248 @@ +#include "Common.h" +#include "SparseBindingTest.h" + +#ifdef _WIN32 + +//////////////////////////////////////////////////////////////////////////////// +// External imports + +extern VkDevice g_hDevice; +extern VmaAllocator g_hAllocator; +extern uint32_t g_FrameIndex; +extern bool g_SparseBindingEnabled; +extern VkQueue g_hSparseBindingQueue; +extern VkFence g_ImmediateFence; + +void SaveAllocatorStatsToFile(const wchar_t* filePath); + +//////////////////////////////////////////////////////////////////////////////// +// Class definitions + +class BaseImage +{ +public: + virtual VkResult Init(RandomNumberGenerator& rand) = 0; + virtual ~BaseImage(); + +protected: + VkImage m_Image = VK_NULL_HANDLE; + + void FillImageCreateInfo(VkImageCreateInfo& outInfo, RandomNumberGenerator& rand); +}; + +class TraditionalImage : public BaseImage +{ +public: + virtual VkResult Init(RandomNumberGenerator& rand); + virtual ~TraditionalImage(); + +private: + VmaAllocation m_Allocation = VK_NULL_HANDLE; +}; + +class SparseBindingImage : public BaseImage +{ +public: + virtual VkResult Init(RandomNumberGenerator& rand); + virtual ~SparseBindingImage(); + +private: + std::vector m_Allocations; +}; + +//////////////////////////////////////////////////////////////////////////////// +// class BaseImage + +BaseImage::~BaseImage() +{ + if(m_Image) + { + vkDestroyImage(g_hDevice, m_Image, nullptr); + } +} + +void BaseImage::FillImageCreateInfo(VkImageCreateInfo& outInfo, RandomNumberGenerator& rand) +{ + constexpr uint32_t imageSizeMin = 8; + constexpr uint32_t imageSizeMax = 2048; + + ZeroMemory(&outInfo, sizeof(outInfo)); + outInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; + outInfo.imageType = VK_IMAGE_TYPE_2D; + outInfo.extent.width = rand.Generate() % (imageSizeMax - imageSizeMin) + imageSizeMin; + outInfo.extent.height = rand.Generate() % (imageSizeMax - imageSizeMin) + imageSizeMin; + outInfo.extent.depth = 1; + outInfo.mipLevels = 1; // TODO ? + outInfo.arrayLayers = 1; // TODO ? + outInfo.format = VK_FORMAT_R8G8B8A8_UNORM; + outInfo.tiling = VK_IMAGE_TILING_OPTIMAL; + outInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; + outInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; + outInfo.samples = VK_SAMPLE_COUNT_1_BIT; + outInfo.flags = 0; +} + +//////////////////////////////////////////////////////////////////////////////// +// class TraditionalImage + +VkResult TraditionalImage::Init(RandomNumberGenerator& rand) +{ + VkImageCreateInfo imageCreateInfo; + FillImageCreateInfo(imageCreateInfo, rand); + + VmaAllocationCreateInfo allocCreateInfo = {}; + allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; + // Default BEST_FIT is clearly better. + //allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT; + + const VkResult res = vmaCreateImage(g_hAllocator, &imageCreateInfo, &allocCreateInfo, + &m_Image, &m_Allocation, nullptr); + + return res; +} + +TraditionalImage::~TraditionalImage() +{ + if(m_Allocation) + { + vmaFreeMemory(g_hAllocator, m_Allocation); + } +} + +//////////////////////////////////////////////////////////////////////////////// +// class SparseBindingImage + +VkResult SparseBindingImage::Init(RandomNumberGenerator& rand) +{ + assert(g_SparseBindingEnabled && g_hSparseBindingQueue); + + // Create image. + VkImageCreateInfo imageCreateInfo; + FillImageCreateInfo(imageCreateInfo, rand); + imageCreateInfo.flags |= VK_IMAGE_CREATE_SPARSE_BINDING_BIT; + VkResult res = vkCreateImage(g_hDevice, &imageCreateInfo, nullptr, &m_Image); + if(res != VK_SUCCESS) + { + return res; + } + + // Get memory requirements. + VkMemoryRequirements imageMemReq; + vkGetImageMemoryRequirements(g_hDevice, m_Image, &imageMemReq); + + // This is just to silence validation layer warning. + // But it doesn't help. Looks like a bug in Vulkan validation layers. + uint32_t sparseMemReqCount = 0; + vkGetImageSparseMemoryRequirements(g_hDevice, m_Image, &sparseMemReqCount, nullptr); + assert(sparseMemReqCount <= 8); + VkSparseImageMemoryRequirements sparseMemReq[8]; + vkGetImageSparseMemoryRequirements(g_hDevice, m_Image, &sparseMemReqCount, sparseMemReq); + + // According to Vulkan specification, for sparse resources memReq.alignment is also page size. + const VkDeviceSize pageSize = imageMemReq.alignment; + const uint32_t pageCount = (uint32_t)ceil_div(imageMemReq.size, pageSize); + + VmaAllocationCreateInfo allocCreateInfo = {}; + allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; + + VkMemoryRequirements pageMemReq = imageMemReq; + pageMemReq.size = pageSize; + + // Allocate and bind memory pages. + m_Allocations.resize(pageCount); + std::fill(m_Allocations.begin(), m_Allocations.end(), nullptr); + std::vector binds{pageCount}; + VmaAllocationInfo allocInfo; + for(uint32_t i = 0; i < pageCount; ++i) + { + res = vmaAllocateMemory(g_hAllocator, &pageMemReq, &allocCreateInfo, &m_Allocations[i], &allocInfo); + if(res != VK_SUCCESS) + { + return res; + } + + binds[i] = {}; + binds[i].resourceOffset = pageSize * i; + binds[i].size = pageSize; + binds[i].memory = allocInfo.deviceMemory; + binds[i].memoryOffset = allocInfo.offset; + } + + VkSparseImageOpaqueMemoryBindInfo imageBindInfo; + imageBindInfo.image = m_Image; + imageBindInfo.bindCount = pageCount; + imageBindInfo.pBinds = binds.data(); + + VkBindSparseInfo bindSparseInfo = { VK_STRUCTURE_TYPE_BIND_SPARSE_INFO }; + bindSparseInfo.pImageOpaqueBinds = &imageBindInfo; + bindSparseInfo.imageOpaqueBindCount = 1; + + ERR_GUARD_VULKAN( vkResetFences(g_hDevice, 1, &g_ImmediateFence) ); + ERR_GUARD_VULKAN( vkQueueBindSparse(g_hSparseBindingQueue, 1, &bindSparseInfo, g_ImmediateFence) ); + ERR_GUARD_VULKAN( vkWaitForFences(g_hDevice, 1, &g_ImmediateFence, VK_TRUE, UINT64_MAX) ); + + return VK_SUCCESS; +} + +SparseBindingImage::~SparseBindingImage() +{ + for(size_t i = m_Allocations.size(); i--; ) + { + vmaFreeMemory(g_hAllocator, m_Allocations[i]); + } +} + +//////////////////////////////////////////////////////////////////////////////// +// Private functions + +//////////////////////////////////////////////////////////////////////////////// +// Public functions + +void TestSparseBinding() +{ + struct ImageInfo + { + std::unique_ptr image; + uint32_t endFrame; + }; + std::vector images; + + constexpr uint32_t frameCount = 2000; + constexpr uint32_t imageLifeFramesMin = 1; + constexpr uint32_t imageLifeFramesMax = 400; + + RandomNumberGenerator rand(4652467); + + for(uint32_t i = 0; i < frameCount; ++i) + { + // Bump frame index. + ++g_FrameIndex; + vmaSetCurrentFrameIndex(g_hAllocator, g_FrameIndex); + + // Create one new, random image. + ImageInfo imageInfo; + //imageInfo.image = std::make_unique(); + imageInfo.image = std::make_unique(); + if(imageInfo.image->Init(rand) == VK_SUCCESS) + { + imageInfo.endFrame = g_FrameIndex + rand.Generate() % (imageLifeFramesMax - imageLifeFramesMin) + imageLifeFramesMin; + images.push_back(std::move(imageInfo)); + } + + // Delete all images that expired. + for(size_t i = images.size(); i--; ) + { + if(g_FrameIndex >= images[i].endFrame) + { + images.erase(images.begin() + i); + } + } + } + + SaveAllocatorStatsToFile(L"SparseBindingTest.json"); + + // Free remaining images. + images.clear(); +} + +#endif // #ifdef _WIN32 diff --git a/src/SparseBindingTest.h b/src/SparseBindingTest.h new file mode 100644 index 0000000..8637c9c --- /dev/null +++ b/src/SparseBindingTest.h @@ -0,0 +1,7 @@ +#pragma once + +#ifdef _WIN32 + +void TestSparseBinding(); + +#endif // #ifdef _WIN32 diff --git a/src/Tests.cpp b/src/Tests.cpp index 30b8bf9..0940573 100644 --- a/src/Tests.cpp +++ b/src/Tests.cpp @@ -139,7 +139,7 @@ struct PoolTestResult static const uint32_t IMAGE_BYTES_PER_PIXEL = 1; -static uint32_t g_FrameIndex = 0; +uint32_t g_FrameIndex = 0; struct BufferInfo { @@ -635,7 +635,7 @@ VkResult MainTest(Result& outResult, const Config& config) return res; } -static void SaveAllocatorStatsToFile(const wchar_t* filePath) +void SaveAllocatorStatsToFile(const wchar_t* filePath) { char* stats; vmaBuildStatsString(g_hAllocator, &stats, VK_TRUE); diff --git a/src/VulkanSample.cpp b/src/VulkanSample.cpp index d33861e..b606433 100644 --- a/src/VulkanSample.cpp +++ b/src/VulkanSample.cpp @@ -22,6 +22,7 @@ #ifdef _WIN32 +#include "SparseBindingTest.h" #include "Tests.h" #include "VmaUsage.h" #include "Common.h" @@ -46,6 +47,7 @@ bool g_MemoryAliasingWarningEnabled = true; static bool g_EnableValidationLayer = true; static bool VK_KHR_get_memory_requirements2_enabled = false; static bool VK_KHR_dedicated_allocation_enabled = false; +bool g_SparseBindingEnabled = false; static HINSTANCE g_hAppInstance; static HWND g_hWnd; @@ -62,11 +64,13 @@ static std::vector g_Framebuffers; static VkCommandPool g_hCommandPool; static VkCommandBuffer g_MainCommandBuffers[COMMAND_BUFFER_COUNT]; static VkFence g_MainCommandBufferExecutedFances[COMMAND_BUFFER_COUNT]; +VkFence g_ImmediateFence; static uint32_t g_NextCommandBufferIndex; static VkSemaphore g_hImageAvailableSemaphore; static VkSemaphore g_hRenderFinishedSemaphore; static uint32_t g_GraphicsQueueFamilyIndex = UINT_MAX; static uint32_t g_PresentQueueFamilyIndex = UINT_MAX; +static uint32_t g_SparseBindingQueueFamilyIndex = UINT_MAX; static VkDescriptorSetLayout g_hDescriptorSetLayout; static VkDescriptorPool g_hDescriptorPool; static VkDescriptorSet g_hDescriptorSet; // Automatically destroyed with m_DescriptorPool. @@ -86,6 +90,7 @@ static PFN_vkDestroyDebugReportCallbackEXT g_pvkDestroyDebugReportCallbackEXT; static VkDebugReportCallbackEXT g_hCallback; static VkQueue g_hGraphicsQueue; +VkQueue g_hSparseBindingQueue; static VkCommandBuffer g_hTemporaryCommandBuffer; static VkPipelineLayout g_hPipelineLayout; @@ -1196,8 +1201,10 @@ static void InitializeApplication() VkPhysicalDeviceProperties physicalDeviceProperties = {}; vkGetPhysicalDeviceProperties(g_hPhysicalDevice, &physicalDeviceProperties); - //VkPhysicalDeviceFeatures physicalDeviceFreatures = {}; - //vkGetPhysicalDeviceFeatures(g_PhysicalDevice, &physicalDeviceFreatures); + VkPhysicalDeviceFeatures physicalDeviceFeatures = {}; + vkGetPhysicalDeviceFeatures(g_hPhysicalDevice, &physicalDeviceFeatures); + + g_SparseBindingEnabled = physicalDeviceFeatures.sparseBinding != 0; // Find queue family index @@ -1208,7 +1215,9 @@ static void InitializeApplication() vkGetPhysicalDeviceQueueFamilyProperties(g_hPhysicalDevice, &queueFamilyCount, queueFamilies.data()); for(uint32_t i = 0; (i < queueFamilyCount) && - (g_GraphicsQueueFamilyIndex == UINT_MAX || g_PresentQueueFamilyIndex == UINT_MAX); + (g_GraphicsQueueFamilyIndex == UINT_MAX || + g_PresentQueueFamilyIndex == UINT_MAX || + (g_SparseBindingEnabled && g_SparseBindingQueueFamilyIndex == UINT_MAX)); ++i) { if(queueFamilies[i].queueCount > 0) @@ -1225,26 +1234,56 @@ static void InitializeApplication() { g_PresentQueueFamilyIndex = i; } + + if(g_SparseBindingEnabled && + g_SparseBindingQueueFamilyIndex == UINT32_MAX && + (queueFamilies[i].queueFlags & VK_QUEUE_SPARSE_BINDING_BIT) != 0) + { + g_SparseBindingQueueFamilyIndex = i; + } } } assert(g_GraphicsQueueFamilyIndex != UINT_MAX); + g_SparseBindingEnabled = g_SparseBindingEnabled && g_SparseBindingQueueFamilyIndex != UINT32_MAX; + // Create logical device const float queuePriority = 1.f; - VkDeviceQueueCreateInfo deviceQueueCreateInfo[2] = { VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO }; - deviceQueueCreateInfo[0].queueFamilyIndex = g_GraphicsQueueFamilyIndex; - deviceQueueCreateInfo[0].queueCount = 1; - deviceQueueCreateInfo[0].pQueuePriorities = &queuePriority; - deviceQueueCreateInfo[1].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; - deviceQueueCreateInfo[1].queueFamilyIndex = g_PresentQueueFamilyIndex; - deviceQueueCreateInfo[1].queueCount = 1; - deviceQueueCreateInfo[1].pQueuePriorities = &queuePriority; + VkDeviceQueueCreateInfo queueCreateInfo[3] = {}; + uint32_t queueCount = 1; + queueCreateInfo[0].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; + queueCreateInfo[0].queueFamilyIndex = g_GraphicsQueueFamilyIndex; + queueCreateInfo[0].queueCount = 1; + queueCreateInfo[0].pQueuePriorities = &queuePriority; + + if(g_PresentQueueFamilyIndex != g_GraphicsQueueFamilyIndex) + { + + queueCreateInfo[queueCount].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; + queueCreateInfo[queueCount].queueFamilyIndex = g_PresentQueueFamilyIndex; + queueCreateInfo[queueCount].queueCount = 1; + queueCreateInfo[queueCount].pQueuePriorities = &queuePriority; + ++queueCount; + } + + if(g_SparseBindingEnabled && + g_SparseBindingQueueFamilyIndex != g_GraphicsQueueFamilyIndex && + g_SparseBindingQueueFamilyIndex != g_PresentQueueFamilyIndex) + { + + queueCreateInfo[queueCount].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; + queueCreateInfo[queueCount].queueFamilyIndex = g_SparseBindingQueueFamilyIndex; + queueCreateInfo[queueCount].queueCount = 1; + queueCreateInfo[queueCount].pQueuePriorities = &queuePriority; + ++queueCount; + } VkPhysicalDeviceFeatures deviceFeatures = {}; - deviceFeatures.fillModeNonSolid = VK_TRUE; + //deviceFeatures.fillModeNonSolid = VK_TRUE; deviceFeatures.samplerAnisotropy = VK_TRUE; + deviceFeatures.sparseBinding = g_SparseBindingEnabled ? VK_TRUE : VK_FALSE; // Determine list of device extensions to enable. std::vector enabledDeviceExtensions; @@ -1279,8 +1318,8 @@ static void InitializeApplication() deviceCreateInfo.ppEnabledLayerNames = nullptr; deviceCreateInfo.enabledExtensionCount = (uint32_t)enabledDeviceExtensions.size(); deviceCreateInfo.ppEnabledExtensionNames = !enabledDeviceExtensions.empty() ? enabledDeviceExtensions.data() : nullptr; - deviceCreateInfo.queueCreateInfoCount = g_PresentQueueFamilyIndex != g_GraphicsQueueFamilyIndex ? 2 : 1; - deviceCreateInfo.pQueueCreateInfos = deviceQueueCreateInfo; + deviceCreateInfo.queueCreateInfoCount = queueCount; + deviceCreateInfo.pQueueCreateInfos = queueCreateInfo; deviceCreateInfo.pEnabledFeatures = &deviceFeatures; ERR_GUARD_VULKAN( vkCreateDevice(g_hPhysicalDevice, &deviceCreateInfo, nullptr, &g_hDevice) ); @@ -1308,13 +1347,19 @@ static void InitializeApplication() ERR_GUARD_VULKAN( vmaCreateAllocator(&allocatorInfo, &g_hAllocator) ); - // Retrieve queue (doesn't need to be destroyed) + // Retrieve queues (don't need to be destroyed). vkGetDeviceQueue(g_hDevice, g_GraphicsQueueFamilyIndex, 0, &g_hGraphicsQueue); vkGetDeviceQueue(g_hDevice, g_PresentQueueFamilyIndex, 0, &g_hPresentQueue); assert(g_hGraphicsQueue); assert(g_hPresentQueue); + if(g_SparseBindingEnabled) + { + vkGetDeviceQueue(g_hDevice, g_SparseBindingQueueFamilyIndex, 0, &g_hSparseBindingQueue); + assert(g_hSparseBindingQueue); + } + // Create command pool VkCommandPoolCreateInfo commandPoolInfo = { VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO }; @@ -1335,6 +1380,8 @@ static void InitializeApplication() ERR_GUARD_VULKAN( vkCreateFence(g_hDevice, &fenceInfo, nullptr, &g_MainCommandBufferExecutedFances[i]) ); } + ERR_GUARD_VULKAN( vkCreateFence(g_hDevice, &fenceInfo, nullptr, &g_ImmediateFence) ); + commandBufferInfo.commandBufferCount = 1; ERR_GUARD_VULKAN( vkAllocateCommandBuffers(g_hDevice, &commandBufferInfo, &g_hTemporaryCommandBuffer) ); @@ -1460,6 +1507,12 @@ static void FinalizeApplication() g_hSampler = VK_NULL_HANDLE; } + if(g_ImmediateFence) + { + vkDestroyFence(g_hDevice, g_ImmediateFence, nullptr); + g_ImmediateFence = VK_NULL_HANDLE; + } + for(size_t i = COMMAND_BUFFER_COUNT; i--; ) { if(g_MainCommandBufferExecutedFances[i] != VK_NULL_HANDLE) @@ -1716,6 +1769,9 @@ static LRESULT WINAPI WndProc(HWND hWnd, UINT msg, WPARAM wParam, LPARAM lParam) case 'T': Test(); break; + case 'S': + TestSparseBinding(); + break; } return 0; From d062b784d3cb4a37842324df6d912ffa60fb7dd6 Mon Sep 17 00:00:00 2001 From: Adam Sawicki Date: Wed, 3 Oct 2018 15:26:22 +0200 Subject: [PATCH 02/10] Added functions: vmaAllocateMemoryPages, vmaFreeMemoryPages to create and destroy multiple allocations at once. --- docs/html/globals.html | 8 +- docs/html/globals_func.html | 6 + docs/html/search/all_10.js | 2 + docs/html/search/functions_0.js | 2 + docs/html/vk__mem__alloc_8h.html | 118 ++++++- docs/html/vk__mem__alloc_8h_source.html | 20 +- src/SparseBindingTest.cpp | 53 +-- src/Tests.cpp | 4 +- src/vk_mem_alloc.h | 425 ++++++++++++++++++++---- 9 files changed, 527 insertions(+), 111 deletions(-) diff --git a/docs/html/globals.html b/docs/html/globals.html index e3c81b7..5b2d293 100644 --- a/docs/html/globals.html +++ b/docs/html/globals.html @@ -192,6 +192,9 @@ $(function() {
  • vmaAllocateMemoryForImage() : vk_mem_alloc.h
  • +
  • vmaAllocateMemoryPages() +: vk_mem_alloc.h +
  • VmaAllocationCreateFlagBits : vk_mem_alloc.h
  • @@ -285,6 +288,9 @@ $(function() {
  • vmaFreeMemory() : vk_mem_alloc.h
  • +
  • vmaFreeMemoryPages() +: vk_mem_alloc.h +
  • vmaFreeStatsString() : vk_mem_alloc.h
  • @@ -328,7 +334,7 @@ $(function() { : vk_mem_alloc.h
  • VmaRecordFlagBits -: vk_mem_alloc.h +: vk_mem_alloc.h
  • VmaRecordFlags : vk_mem_alloc.h diff --git a/docs/html/globals_func.html b/docs/html/globals_func.html index bcad79b..36c7782 100644 --- a/docs/html/globals_func.html +++ b/docs/html/globals_func.html @@ -73,6 +73,9 @@ $(function() {
  • vmaAllocateMemoryForImage() : vk_mem_alloc.h
  • +
  • vmaAllocateMemoryPages() +: vk_mem_alloc.h +
  • vmaBindBufferMemory() : vk_mem_alloc.h
  • @@ -136,6 +139,9 @@ $(function() {
  • vmaFreeMemory() : vk_mem_alloc.h
  • +
  • vmaFreeMemoryPages() +: vk_mem_alloc.h +
  • vmaFreeStatsString() : vk_mem_alloc.h
  • diff --git a/docs/html/search/all_10.js b/docs/html/search/all_10.js index f2abd6d..fea5d80 100644 --- a/docs/html/search/all_10.js +++ b/docs/html/search/all_10.js @@ -56,6 +56,7 @@ var searchData= ['vmaallocatememory',['vmaAllocateMemory',['../vk__mem__alloc_8h.html#abf28077dbf82d0908b8acbe8ee8dd9b8',1,'vk_mem_alloc.h']]], ['vmaallocatememoryforbuffer',['vmaAllocateMemoryForBuffer',['../vk__mem__alloc_8h.html#a7fdf64415b6c3d83c454f28d2c53df7b',1,'vk_mem_alloc.h']]], ['vmaallocatememoryforimage',['vmaAllocateMemoryForImage',['../vk__mem__alloc_8h.html#a0faa3f9e5fb233d29d1e00390650febb',1,'vk_mem_alloc.h']]], + ['vmaallocatememorypages',['vmaAllocateMemoryPages',['../vk__mem__alloc_8h.html#ad37e82e492b3de38fc3f4cffd9ad0ae1',1,'vk_mem_alloc.h']]], ['vmaallocation',['VmaAllocation',['../struct_vma_allocation.html',1,'']]], ['vmaallocationcreateflagbits',['VmaAllocationCreateFlagBits',['../vk__mem__alloc_8h.html#ad9889c10c798b040d59c92f257cae597',1,'VmaAllocationCreateFlagBits(): vk_mem_alloc.h'],['../vk__mem__alloc_8h.html#abf6bf6748c7a9fe7ce5b7835c0f56af4',1,'VmaAllocationCreateFlagBits(): vk_mem_alloc.h']]], ['vmaallocationcreateflags',['VmaAllocationCreateFlags',['../vk__mem__alloc_8h.html#a5225e5e11f8376f6a31a1791f3d6e817',1,'vk_mem_alloc.h']]], @@ -89,6 +90,7 @@ var searchData= ['vmafindmemorytypeindexforimageinfo',['vmaFindMemoryTypeIndexForImageInfo',['../vk__mem__alloc_8h.html#a088da83d8eaf3ce9056d9ea0b981d472',1,'vk_mem_alloc.h']]], ['vmaflushallocation',['vmaFlushAllocation',['../vk__mem__alloc_8h.html#abc34ee6f021f459aff885f3758c435de',1,'vk_mem_alloc.h']]], ['vmafreememory',['vmaFreeMemory',['../vk__mem__alloc_8h.html#a11f0fbc034fa81a4efedd73d61ce7568',1,'vk_mem_alloc.h']]], + ['vmafreememorypages',['vmaFreeMemoryPages',['../vk__mem__alloc_8h.html#ab9e709de044c5d8476bea77a4e755840',1,'vk_mem_alloc.h']]], ['vmafreestatsstring',['vmaFreeStatsString',['../vk__mem__alloc_8h.html#a3104eb30d8122c84dd8541063f145288',1,'vk_mem_alloc.h']]], ['vmagetallocationinfo',['vmaGetAllocationInfo',['../vk__mem__alloc_8h.html#a86dd08aba8633bfa4ad0df2e76481d8b',1,'vk_mem_alloc.h']]], ['vmagetmemoryproperties',['vmaGetMemoryProperties',['../vk__mem__alloc_8h.html#ab88db292a17974f911182543fda52d19',1,'vk_mem_alloc.h']]], diff --git a/docs/html/search/functions_0.js b/docs/html/search/functions_0.js index 0ab7deb..53a1d34 100644 --- a/docs/html/search/functions_0.js +++ b/docs/html/search/functions_0.js @@ -3,6 +3,7 @@ var searchData= ['vmaallocatememory',['vmaAllocateMemory',['../vk__mem__alloc_8h.html#abf28077dbf82d0908b8acbe8ee8dd9b8',1,'vk_mem_alloc.h']]], ['vmaallocatememoryforbuffer',['vmaAllocateMemoryForBuffer',['../vk__mem__alloc_8h.html#a7fdf64415b6c3d83c454f28d2c53df7b',1,'vk_mem_alloc.h']]], ['vmaallocatememoryforimage',['vmaAllocateMemoryForImage',['../vk__mem__alloc_8h.html#a0faa3f9e5fb233d29d1e00390650febb',1,'vk_mem_alloc.h']]], + ['vmaallocatememorypages',['vmaAllocateMemoryPages',['../vk__mem__alloc_8h.html#ad37e82e492b3de38fc3f4cffd9ad0ae1',1,'vk_mem_alloc.h']]], ['vmabindbuffermemory',['vmaBindBufferMemory',['../vk__mem__alloc_8h.html#a6b0929b914b60cf2d45cac4bf3547470',1,'vk_mem_alloc.h']]], ['vmabindimagememory',['vmaBindImageMemory',['../vk__mem__alloc_8h.html#a3d3ca45799923aa5d138e9e5f9eb2da5',1,'vk_mem_alloc.h']]], ['vmabuildstatsstring',['vmaBuildStatsString',['../vk__mem__alloc_8h.html#aa4fee7eb5253377599ef4fd38c93c2a0',1,'vk_mem_alloc.h']]], @@ -24,6 +25,7 @@ var searchData= ['vmafindmemorytypeindexforimageinfo',['vmaFindMemoryTypeIndexForImageInfo',['../vk__mem__alloc_8h.html#a088da83d8eaf3ce9056d9ea0b981d472',1,'vk_mem_alloc.h']]], ['vmaflushallocation',['vmaFlushAllocation',['../vk__mem__alloc_8h.html#abc34ee6f021f459aff885f3758c435de',1,'vk_mem_alloc.h']]], ['vmafreememory',['vmaFreeMemory',['../vk__mem__alloc_8h.html#a11f0fbc034fa81a4efedd73d61ce7568',1,'vk_mem_alloc.h']]], + ['vmafreememorypages',['vmaFreeMemoryPages',['../vk__mem__alloc_8h.html#ab9e709de044c5d8476bea77a4e755840',1,'vk_mem_alloc.h']]], ['vmafreestatsstring',['vmaFreeStatsString',['../vk__mem__alloc_8h.html#a3104eb30d8122c84dd8541063f145288',1,'vk_mem_alloc.h']]], ['vmagetallocationinfo',['vmaGetAllocationInfo',['../vk__mem__alloc_8h.html#a86dd08aba8633bfa4ad0df2e76481d8b',1,'vk_mem_alloc.h']]], ['vmagetmemoryproperties',['vmaGetMemoryProperties',['../vk__mem__alloc_8h.html#ab88db292a17974f911182543fda52d19',1,'vk_mem_alloc.h']]], diff --git a/docs/html/vk__mem__alloc_8h.html b/docs/html/vk__mem__alloc_8h.html index 86a5da9..6c3409a 100644 --- a/docs/html/vk__mem__alloc_8h.html +++ b/docs/html/vk__mem__alloc_8h.html @@ -302,6 +302,9 @@ Functions VkResult vmaAllocateMemory (VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)  General purpose memory allocation. More...
      +VkResult vmaAllocateMemoryPages (VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo) + General purpose memory allocation for multiple allocation objects at once. More...
    +  VkResult vmaAllocateMemoryForBuffer (VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)   VkResult vmaAllocateMemoryForImage (VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo) @@ -310,6 +313,9 @@ Functions void vmaFreeMemory (VmaAllocator allocator, VmaAllocation allocation)  Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage(). More...
      +void vmaFreeMemoryPages (VmaAllocator allocator, size_t allocationCount, VmaAllocation *pAllocations) + Frees memory and destroys multiple allocations. More...
    +  void vmaGetAllocationInfo (VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)  Returns current information about specified allocation and atomically marks it as used in current frame. More...
      @@ -1022,7 +1028,7 @@ Functions -

    You should free the memory using vmaFreeMemory().

    +

    You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages().

    It is recommended to use vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(), vmaCreateBuffer(), vmaCreateImage() instead whenever possible.

    @@ -1127,6 +1133,76 @@ Functions

    Function similar to vmaAllocateMemoryForBuffer().

    + + + +

    ◆ vmaAllocateMemoryPages()

    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    VkResult vmaAllocateMemoryPages (VmaAllocator allocator,
    const VkMemoryRequirements * pVkMemoryRequirements,
    const VmaAllocationCreateInfopCreateInfo,
    size_t allocationCount,
    VmaAllocationpAllocations,
    VmaAllocationInfopAllocationInfo 
    )
    +
    + +

    General purpose memory allocation for multiple allocation objects at once.

    +
    Parameters
    + + + + + + + +
    allocatorAllocator object.
    pVkMemoryRequirementsMemory requirements for each allocation.
    pCreateInfoCreation parameters for each alloction.
    allocationCountNumber of allocations to make.
    [out]pAllocationsPointer to array that will be filled with handles to created allocations.
    [out]pAlocationInfoOptional. Pointer to array that will be filled with parameters of created allocations.
    +
    +
    +

    You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages().

    +

    Word "pages" is just a suggestion to use this function to allocate pieces of memory needed for sparse binding. It is just a general purpose allocation function able to make multiple allocations at once. It may be internally optimized to be more efficient than calling vmaAllocateMemory() allocationCount times.

    +

    All allocations are made using same parameters. All of them are created out of the same memory pool and type. If any allocation fails, all allocations already made within this function call are also freed, so that when returned result is not VK_SUCCESS, pAllocation array is always entirely filled with VK_NULL_HANDLE.

    +

    TODO Also write tests for it.

    +

    TODO also write test for allocation that will partially fail.

    +
    @@ -2016,6 +2092,46 @@ Functions

    Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().

    +

    Passing VK_NULL_HANDLE as allocation is valid. Such function call is just skipped.

    + +
    + + +

    ◆ vmaFreeMemoryPages()

    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + +
    void vmaFreeMemoryPages (VmaAllocator allocator,
    size_t allocationCount,
    VmaAllocationpAllocations 
    )
    +
    + +

    Frees memory and destroys multiple allocations.

    +

    Word "pages" is just a suggestion to use this function to free pieces of memory used for sparse binding. It is just a general purpose function to free memory and destroy allocations made using e.g. vmaAllocateMemory(), vmaAllocateMemoryPages() and other functions. It may be internally optimized to be more efficient than calling vmaFreeMemory() allocationCount times.

    +

    Allocations in pAllocations array can come from any memory pools and types. Passing VK_NULL_HANDLE as elements of pAllocations array is valid. Such entries are just skipped.

    +

    TODO Also write tests for it.

    diff --git a/docs/html/vk__mem__alloc_8h_source.html b/docs/html/vk__mem__alloc_8h_source.html index 31faa2f..67960d3 100644 --- a/docs/html/vk__mem__alloc_8h_source.html +++ b/docs/html/vk__mem__alloc_8h_source.html @@ -65,7 +65,7 @@ $(function() {
    vk_mem_alloc.h
    -Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    1460 /*
    1461 Define this macro to 0/1 to disable/enable support for recording functionality,
    1462 available through VmaAllocatorCreateInfo::pRecordSettings.
    1463 */
    1464 #ifndef VMA_RECORDING_ENABLED
    1465  #ifdef _WIN32
    1466  #define VMA_RECORDING_ENABLED 1
    1467  #else
    1468  #define VMA_RECORDING_ENABLED 0
    1469  #endif
    1470 #endif
    1471 
    1472 #ifndef NOMINMAX
    1473  #define NOMINMAX // For windows.h
    1474 #endif
    1475 
    1476 #include <vulkan/vulkan.h>
    1477 
    1478 #if VMA_RECORDING_ENABLED
    1479  #include <windows.h>
    1480 #endif
    1481 
    1482 #if !defined(VMA_DEDICATED_ALLOCATION)
    1483  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
    1484  #define VMA_DEDICATED_ALLOCATION 1
    1485  #else
    1486  #define VMA_DEDICATED_ALLOCATION 0
    1487  #endif
    1488 #endif
    1489 
    1499 VK_DEFINE_HANDLE(VmaAllocator)
    1500 
    1501 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    1503  VmaAllocator allocator,
    1504  uint32_t memoryType,
    1505  VkDeviceMemory memory,
    1506  VkDeviceSize size);
    1508 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    1509  VmaAllocator allocator,
    1510  uint32_t memoryType,
    1511  VkDeviceMemory memory,
    1512  VkDeviceSize size);
    1513 
    1527 
    1557 
    1560 typedef VkFlags VmaAllocatorCreateFlags;
    1561 
    1566 typedef struct VmaVulkanFunctions {
    1567  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    1568  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    1569  PFN_vkAllocateMemory vkAllocateMemory;
    1570  PFN_vkFreeMemory vkFreeMemory;
    1571  PFN_vkMapMemory vkMapMemory;
    1572  PFN_vkUnmapMemory vkUnmapMemory;
    1573  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
    1574  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
    1575  PFN_vkBindBufferMemory vkBindBufferMemory;
    1576  PFN_vkBindImageMemory vkBindImageMemory;
    1577  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    1578  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    1579  PFN_vkCreateBuffer vkCreateBuffer;
    1580  PFN_vkDestroyBuffer vkDestroyBuffer;
    1581  PFN_vkCreateImage vkCreateImage;
    1582  PFN_vkDestroyImage vkDestroyImage;
    1583 #if VMA_DEDICATED_ALLOCATION
    1584  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    1585  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    1586 #endif
    1588 
    1590 typedef enum VmaRecordFlagBits {
    1597 
    1600 typedef VkFlags VmaRecordFlags;
    1601 
    1603 typedef struct VmaRecordSettings
    1604 {
    1614  const char* pFilePath;
    1616 
    1619 {
    1623 
    1624  VkPhysicalDevice physicalDevice;
    1626 
    1627  VkDevice device;
    1629 
    1632 
    1633  const VkAllocationCallbacks* pAllocationCallbacks;
    1635 
    1674  const VkDeviceSize* pHeapSizeLimit;
    1695 
    1697 VkResult vmaCreateAllocator(
    1698  const VmaAllocatorCreateInfo* pCreateInfo,
    1699  VmaAllocator* pAllocator);
    1700 
    1702 void vmaDestroyAllocator(
    1703  VmaAllocator allocator);
    1704 
    1710  VmaAllocator allocator,
    1711  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    1712 
    1718  VmaAllocator allocator,
    1719  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    1720 
    1728  VmaAllocator allocator,
    1729  uint32_t memoryTypeIndex,
    1730  VkMemoryPropertyFlags* pFlags);
    1731 
    1741  VmaAllocator allocator,
    1742  uint32_t frameIndex);
    1743 
    1746 typedef struct VmaStatInfo
    1747 {
    1749  uint32_t blockCount;
    1755  VkDeviceSize usedBytes;
    1757  VkDeviceSize unusedBytes;
    1760 } VmaStatInfo;
    1761 
    1763 typedef struct VmaStats
    1764 {
    1765  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    1766  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    1768 } VmaStats;
    1769 
    1771 void vmaCalculateStats(
    1772  VmaAllocator allocator,
    1773  VmaStats* pStats);
    1774 
    1775 #define VMA_STATS_STRING_ENABLED 1
    1776 
    1777 #if VMA_STATS_STRING_ENABLED
    1778 
    1780 
    1782 void vmaBuildStatsString(
    1783  VmaAllocator allocator,
    1784  char** ppStatsString,
    1785  VkBool32 detailedMap);
    1786 
    1787 void vmaFreeStatsString(
    1788  VmaAllocator allocator,
    1789  char* pStatsString);
    1790 
    1791 #endif // #if VMA_STATS_STRING_ENABLED
    1792 
    1801 VK_DEFINE_HANDLE(VmaPool)
    1802 
    1803 typedef enum VmaMemoryUsage
    1804 {
    1853 } VmaMemoryUsage;
    1854 
    1869 
    1924 
    1937 
    1947 
    1954 
    1958 
    1960 {
    1973  VkMemoryPropertyFlags requiredFlags;
    1978  VkMemoryPropertyFlags preferredFlags;
    1986  uint32_t memoryTypeBits;
    1999  void* pUserData;
    2001 
    2018 VkResult vmaFindMemoryTypeIndex(
    2019  VmaAllocator allocator,
    2020  uint32_t memoryTypeBits,
    2021  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2022  uint32_t* pMemoryTypeIndex);
    2023 
    2037  VmaAllocator allocator,
    2038  const VkBufferCreateInfo* pBufferCreateInfo,
    2039  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2040  uint32_t* pMemoryTypeIndex);
    2041 
    2055  VmaAllocator allocator,
    2056  const VkImageCreateInfo* pImageCreateInfo,
    2057  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2058  uint32_t* pMemoryTypeIndex);
    2059 
    2080 
    2097 
    2108 
    2114 
    2117 typedef VkFlags VmaPoolCreateFlags;
    2118 
    2121 typedef struct VmaPoolCreateInfo {
    2136  VkDeviceSize blockSize;
    2165 
    2168 typedef struct VmaPoolStats {
    2171  VkDeviceSize size;
    2174  VkDeviceSize unusedSize;
    2187  VkDeviceSize unusedRangeSizeMax;
    2190  size_t blockCount;
    2191 } VmaPoolStats;
    2192 
    2199 VkResult vmaCreatePool(
    2200  VmaAllocator allocator,
    2201  const VmaPoolCreateInfo* pCreateInfo,
    2202  VmaPool* pPool);
    2203 
    2206 void vmaDestroyPool(
    2207  VmaAllocator allocator,
    2208  VmaPool pool);
    2209 
    2216 void vmaGetPoolStats(
    2217  VmaAllocator allocator,
    2218  VmaPool pool,
    2219  VmaPoolStats* pPoolStats);
    2220 
    2228  VmaAllocator allocator,
    2229  VmaPool pool,
    2230  size_t* pLostAllocationCount);
    2231 
    2246 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
    2247 
    2272 VK_DEFINE_HANDLE(VmaAllocation)
    2273 
    2274 
    2276 typedef struct VmaAllocationInfo {
    2281  uint32_t memoryType;
    2290  VkDeviceMemory deviceMemory;
    2295  VkDeviceSize offset;
    2300  VkDeviceSize size;
    2314  void* pUserData;
    2316 
    2327 VkResult vmaAllocateMemory(
    2328  VmaAllocator allocator,
    2329  const VkMemoryRequirements* pVkMemoryRequirements,
    2330  const VmaAllocationCreateInfo* pCreateInfo,
    2331  VmaAllocation* pAllocation,
    2332  VmaAllocationInfo* pAllocationInfo);
    2333 
    2341  VmaAllocator allocator,
    2342  VkBuffer buffer,
    2343  const VmaAllocationCreateInfo* pCreateInfo,
    2344  VmaAllocation* pAllocation,
    2345  VmaAllocationInfo* pAllocationInfo);
    2346 
    2348 VkResult vmaAllocateMemoryForImage(
    2349  VmaAllocator allocator,
    2350  VkImage image,
    2351  const VmaAllocationCreateInfo* pCreateInfo,
    2352  VmaAllocation* pAllocation,
    2353  VmaAllocationInfo* pAllocationInfo);
    2354 
    2356 void vmaFreeMemory(
    2357  VmaAllocator allocator,
    2358  VmaAllocation allocation);
    2359 
    2377  VmaAllocator allocator,
    2378  VmaAllocation allocation,
    2379  VmaAllocationInfo* pAllocationInfo);
    2380 
    2395 VkBool32 vmaTouchAllocation(
    2396  VmaAllocator allocator,
    2397  VmaAllocation allocation);
    2398 
    2413  VmaAllocator allocator,
    2414  VmaAllocation allocation,
    2415  void* pUserData);
    2416 
    2428  VmaAllocator allocator,
    2429  VmaAllocation* pAllocation);
    2430 
    2465 VkResult vmaMapMemory(
    2466  VmaAllocator allocator,
    2467  VmaAllocation allocation,
    2468  void** ppData);
    2469 
    2474 void vmaUnmapMemory(
    2475  VmaAllocator allocator,
    2476  VmaAllocation allocation);
    2477 
    2490 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2491 
    2504 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2505 
    2522 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
    2523 
    2525 typedef struct VmaDefragmentationInfo {
    2530  VkDeviceSize maxBytesToMove;
    2537 
    2539 typedef struct VmaDefragmentationStats {
    2541  VkDeviceSize bytesMoved;
    2543  VkDeviceSize bytesFreed;
    2549 
    2588 VkResult vmaDefragment(
    2589  VmaAllocator allocator,
    2590  VmaAllocation* pAllocations,
    2591  size_t allocationCount,
    2592  VkBool32* pAllocationsChanged,
    2593  const VmaDefragmentationInfo *pDefragmentationInfo,
    2594  VmaDefragmentationStats* pDefragmentationStats);
    2595 
    2608 VkResult vmaBindBufferMemory(
    2609  VmaAllocator allocator,
    2610  VmaAllocation allocation,
    2611  VkBuffer buffer);
    2612 
    2625 VkResult vmaBindImageMemory(
    2626  VmaAllocator allocator,
    2627  VmaAllocation allocation,
    2628  VkImage image);
    2629 
    2656 VkResult vmaCreateBuffer(
    2657  VmaAllocator allocator,
    2658  const VkBufferCreateInfo* pBufferCreateInfo,
    2659  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2660  VkBuffer* pBuffer,
    2661  VmaAllocation* pAllocation,
    2662  VmaAllocationInfo* pAllocationInfo);
    2663 
    2675 void vmaDestroyBuffer(
    2676  VmaAllocator allocator,
    2677  VkBuffer buffer,
    2678  VmaAllocation allocation);
    2679 
    2681 VkResult vmaCreateImage(
    2682  VmaAllocator allocator,
    2683  const VkImageCreateInfo* pImageCreateInfo,
    2684  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2685  VkImage* pImage,
    2686  VmaAllocation* pAllocation,
    2687  VmaAllocationInfo* pAllocationInfo);
    2688 
    2700 void vmaDestroyImage(
    2701  VmaAllocator allocator,
    2702  VkImage image,
    2703  VmaAllocation allocation);
    2704 
    2705 #ifdef __cplusplus
    2706 }
    2707 #endif
    2708 
    2709 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    2710 
    2711 // For Visual Studio IntelliSense.
    2712 #if defined(__cplusplus) && defined(__INTELLISENSE__)
    2713 #define VMA_IMPLEMENTATION
    2714 #endif
    2715 
    2716 #ifdef VMA_IMPLEMENTATION
    2717 #undef VMA_IMPLEMENTATION
    2718 
    2719 #include <cstdint>
    2720 #include <cstdlib>
    2721 #include <cstring>
    2722 
    2723 /*******************************************************************************
    2724 CONFIGURATION SECTION
    2725 
    2726 Define some of these macros before each #include of this header or change them
    2727 here if you need other then default behavior depending on your environment.
    2728 */
    2729 
    2730 /*
    2731 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    2732 internally, like:
    2733 
    2734  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    2735 
    2736 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    2737 VmaAllocatorCreateInfo::pVulkanFunctions.
    2738 */
    2739 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
    2740 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    2741 #endif
    2742 
    2743 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    2744 //#define VMA_USE_STL_CONTAINERS 1
    2745 
    2746 /* Set this macro to 1 to make the library including and using STL containers:
    2747 std::pair, std::vector, std::list, std::unordered_map.
    2748 
    2749 Set it to 0 or undefined to make the library using its own implementation of
    2750 the containers.
    2751 */
    2752 #if VMA_USE_STL_CONTAINERS
    2753  #define VMA_USE_STL_VECTOR 1
    2754  #define VMA_USE_STL_UNORDERED_MAP 1
    2755  #define VMA_USE_STL_LIST 1
    2756 #endif
    2757 
    2758 #if VMA_USE_STL_VECTOR
    2759  #include <vector>
    2760 #endif
    2761 
    2762 #if VMA_USE_STL_UNORDERED_MAP
    2763  #include <unordered_map>
    2764 #endif
    2765 
    2766 #if VMA_USE_STL_LIST
    2767  #include <list>
    2768 #endif
    2769 
    2770 /*
    2771 Following headers are used in this CONFIGURATION section only, so feel free to
    2772 remove them if not needed.
    2773 */
    2774 #include <cassert> // for assert
    2775 #include <algorithm> // for min, max
    2776 #include <mutex> // for std::mutex
    2777 #include <atomic> // for std::atomic
    2778 
    2779 #ifndef VMA_NULL
    2780  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    2781  #define VMA_NULL nullptr
    2782 #endif
    2783 
    2784 #if defined(__APPLE__) || defined(__ANDROID__)
    2785 #include <cstdlib>
    2786 void *aligned_alloc(size_t alignment, size_t size)
    2787 {
    2788  // alignment must be >= sizeof(void*)
    2789  if(alignment < sizeof(void*))
    2790  {
    2791  alignment = sizeof(void*);
    2792  }
    2793 
    2794  void *pointer;
    2795  if(posix_memalign(&pointer, alignment, size) == 0)
    2796  return pointer;
    2797  return VMA_NULL;
    2798 }
    2799 #endif
    2800 
    2801 // If your compiler is not compatible with C++11 and definition of
    2802 // aligned_alloc() function is missing, uncommeting following line may help:
    2803 
    2804 //#include <malloc.h>
    2805 
    2806 // Normal assert to check for programmer's errors, especially in Debug configuration.
    2807 #ifndef VMA_ASSERT
    2808  #ifdef _DEBUG
    2809  #define VMA_ASSERT(expr) assert(expr)
    2810  #else
    2811  #define VMA_ASSERT(expr)
    2812  #endif
    2813 #endif
    2814 
    2815 // Assert that will be called very often, like inside data structures e.g. operator[].
    2816 // Making it non-empty can make program slow.
    2817 #ifndef VMA_HEAVY_ASSERT
    2818  #ifdef _DEBUG
    2819  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    2820  #else
    2821  #define VMA_HEAVY_ASSERT(expr)
    2822  #endif
    2823 #endif
    2824 
    2825 #ifndef VMA_ALIGN_OF
    2826  #define VMA_ALIGN_OF(type) (__alignof(type))
    2827 #endif
    2828 
    2829 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    2830  #if defined(_WIN32)
    2831  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    2832  #else
    2833  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    2834  #endif
    2835 #endif
    2836 
    2837 #ifndef VMA_SYSTEM_FREE
    2838  #if defined(_WIN32)
    2839  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    2840  #else
    2841  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    2842  #endif
    2843 #endif
    2844 
    2845 #ifndef VMA_MIN
    2846  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    2847 #endif
    2848 
    2849 #ifndef VMA_MAX
    2850  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    2851 #endif
    2852 
    2853 #ifndef VMA_SWAP
    2854  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    2855 #endif
    2856 
    2857 #ifndef VMA_SORT
    2858  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    2859 #endif
    2860 
    2861 #ifndef VMA_DEBUG_LOG
    2862  #define VMA_DEBUG_LOG(format, ...)
    2863  /*
    2864  #define VMA_DEBUG_LOG(format, ...) do { \
    2865  printf(format, __VA_ARGS__); \
    2866  printf("\n"); \
    2867  } while(false)
    2868  */
    2869 #endif
    2870 
    2871 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    2872 #if VMA_STATS_STRING_ENABLED
    2873  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    2874  {
    2875  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    2876  }
    2877  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    2878  {
    2879  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    2880  }
    2881  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    2882  {
    2883  snprintf(outStr, strLen, "%p", ptr);
    2884  }
    2885 #endif
    2886 
    2887 #ifndef VMA_MUTEX
    2888  class VmaMutex
    2889  {
    2890  public:
    2891  VmaMutex() { }
    2892  ~VmaMutex() { }
    2893  void Lock() { m_Mutex.lock(); }
    2894  void Unlock() { m_Mutex.unlock(); }
    2895  private:
    2896  std::mutex m_Mutex;
    2897  };
    2898  #define VMA_MUTEX VmaMutex
    2899 #endif
    2900 
    2901 /*
    2902 If providing your own implementation, you need to implement a subset of std::atomic:
    2903 
    2904 - Constructor(uint32_t desired)
    2905 - uint32_t load() const
    2906 - void store(uint32_t desired)
    2907 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    2908 */
    2909 #ifndef VMA_ATOMIC_UINT32
    2910  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    2911 #endif
    2912 
    2913 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    2914 
    2918  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    2919 #endif
    2920 
    2921 #ifndef VMA_DEBUG_ALIGNMENT
    2922 
    2926  #define VMA_DEBUG_ALIGNMENT (1)
    2927 #endif
    2928 
    2929 #ifndef VMA_DEBUG_MARGIN
    2930 
    2934  #define VMA_DEBUG_MARGIN (0)
    2935 #endif
    2936 
    2937 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
    2938 
    2942  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
    2943 #endif
    2944 
    2945 #ifndef VMA_DEBUG_DETECT_CORRUPTION
    2946 
    2951  #define VMA_DEBUG_DETECT_CORRUPTION (0)
    2952 #endif
    2953 
    2954 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    2955 
    2959  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    2960 #endif
    2961 
    2962 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    2963 
    2967  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    2968 #endif
    2969 
    2970 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    2971  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
    2973 #endif
    2974 
    2975 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    2976  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
    2978 #endif
    2979 
    2980 #ifndef VMA_CLASS_NO_COPY
    2981  #define VMA_CLASS_NO_COPY(className) \
    2982  private: \
    2983  className(const className&) = delete; \
    2984  className& operator=(const className&) = delete;
    2985 #endif
    2986 
    2987 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    2988 
    2989 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
    2990 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
    2991 
    2992 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
    2993 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
    2994 
    2995 /*******************************************************************************
    2996 END OF CONFIGURATION
    2997 */
    2998 
    2999 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    3000  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    3001 
    3002 // Returns number of bits set to 1 in (v).
    3003 static inline uint32_t VmaCountBitsSet(uint32_t v)
    3004 {
    3005  uint32_t c = v - ((v >> 1) & 0x55555555);
    3006  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    3007  c = ((c >> 4) + c) & 0x0F0F0F0F;
    3008  c = ((c >> 8) + c) & 0x00FF00FF;
    3009  c = ((c >> 16) + c) & 0x0000FFFF;
    3010  return c;
    3011 }
    3012 
    3013 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    3014 // Use types like uint32_t, uint64_t as T.
    3015 template <typename T>
    3016 static inline T VmaAlignUp(T val, T align)
    3017 {
    3018  return (val + align - 1) / align * align;
    3019 }
    3020 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
    3021 // Use types like uint32_t, uint64_t as T.
    3022 template <typename T>
    3023 static inline T VmaAlignDown(T val, T align)
    3024 {
    3025  return val / align * align;
    3026 }
    3027 
    3028 // Division with mathematical rounding to nearest number.
    3029 template <typename T>
    3030 static inline T VmaRoundDiv(T x, T y)
    3031 {
    3032  return (x + (y / (T)2)) / y;
    3033 }
    3034 
    3035 /*
    3036 Returns true if given number is a power of two.
    3037 T must be unsigned integer number or signed integer but always nonnegative.
    3038 For 0 returns true.
    3039 */
    3040 template <typename T>
    3041 inline bool VmaIsPow2(T x)
    3042 {
    3043  return (x & (x-1)) == 0;
    3044 }
    3045 
    3046 // Returns smallest power of 2 greater or equal to v.
    3047 static inline uint32_t VmaNextPow2(uint32_t v)
    3048 {
    3049  v--;
    3050  v |= v >> 1;
    3051  v |= v >> 2;
    3052  v |= v >> 4;
    3053  v |= v >> 8;
    3054  v |= v >> 16;
    3055  v++;
    3056  return v;
    3057 }
    3058 static inline uint64_t VmaNextPow2(uint64_t v)
    3059 {
    3060  v--;
    3061  v |= v >> 1;
    3062  v |= v >> 2;
    3063  v |= v >> 4;
    3064  v |= v >> 8;
    3065  v |= v >> 16;
    3066  v |= v >> 32;
    3067  v++;
    3068  return v;
    3069 }
    3070 
    3071 // Returns largest power of 2 less or equal to v.
    3072 static inline uint32_t VmaPrevPow2(uint32_t v)
    3073 {
    3074  v |= v >> 1;
    3075  v |= v >> 2;
    3076  v |= v >> 4;
    3077  v |= v >> 8;
    3078  v |= v >> 16;
    3079  v = v ^ (v >> 1);
    3080  return v;
    3081 }
    3082 static inline uint64_t VmaPrevPow2(uint64_t v)
    3083 {
    3084  v |= v >> 1;
    3085  v |= v >> 2;
    3086  v |= v >> 4;
    3087  v |= v >> 8;
    3088  v |= v >> 16;
    3089  v |= v >> 32;
    3090  v = v ^ (v >> 1);
    3091  return v;
    3092 }
    3093 
    3094 static inline bool VmaStrIsEmpty(const char* pStr)
    3095 {
    3096  return pStr == VMA_NULL || *pStr == '\0';
    3097 }
    3098 
    3099 static const char* VmaAlgorithmToStr(uint32_t algorithm)
    3100 {
    3101  switch(algorithm)
    3102  {
    3104  return "Linear";
    3106  return "Buddy";
    3107  case 0:
    3108  return "Default";
    3109  default:
    3110  VMA_ASSERT(0);
    3111  return "";
    3112  }
    3113 }
    3114 
    3115 #ifndef VMA_SORT
    3116 
    3117 template<typename Iterator, typename Compare>
    3118 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    3119 {
    3120  Iterator centerValue = end; --centerValue;
    3121  Iterator insertIndex = beg;
    3122  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    3123  {
    3124  if(cmp(*memTypeIndex, *centerValue))
    3125  {
    3126  if(insertIndex != memTypeIndex)
    3127  {
    3128  VMA_SWAP(*memTypeIndex, *insertIndex);
    3129  }
    3130  ++insertIndex;
    3131  }
    3132  }
    3133  if(insertIndex != centerValue)
    3134  {
    3135  VMA_SWAP(*insertIndex, *centerValue);
    3136  }
    3137  return insertIndex;
    3138 }
    3139 
    3140 template<typename Iterator, typename Compare>
    3141 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    3142 {
    3143  if(beg < end)
    3144  {
    3145  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    3146  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    3147  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    3148  }
    3149 }
    3150 
    3151 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    3152 
    3153 #endif // #ifndef VMA_SORT
    3154 
    3155 /*
    3156 Returns true if two memory blocks occupy overlapping pages.
    3157 ResourceA must be in less memory offset than ResourceB.
    3158 
    3159 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    3160 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    3161 */
    3162 static inline bool VmaBlocksOnSamePage(
    3163  VkDeviceSize resourceAOffset,
    3164  VkDeviceSize resourceASize,
    3165  VkDeviceSize resourceBOffset,
    3166  VkDeviceSize pageSize)
    3167 {
    3168  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    3169  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    3170  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    3171  VkDeviceSize resourceBStart = resourceBOffset;
    3172  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    3173  return resourceAEndPage == resourceBStartPage;
    3174 }
    3175 
    3176 enum VmaSuballocationType
    3177 {
    3178  VMA_SUBALLOCATION_TYPE_FREE = 0,
    3179  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    3180  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    3181  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    3182  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    3183  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    3184  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    3185 };
    3186 
    3187 /*
    3188 Returns true if given suballocation types could conflict and must respect
    3189 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    3190 or linear image and another one is optimal image. If type is unknown, behave
    3191 conservatively.
    3192 */
    3193 static inline bool VmaIsBufferImageGranularityConflict(
    3194  VmaSuballocationType suballocType1,
    3195  VmaSuballocationType suballocType2)
    3196 {
    3197  if(suballocType1 > suballocType2)
    3198  {
    3199  VMA_SWAP(suballocType1, suballocType2);
    3200  }
    3201 
    3202  switch(suballocType1)
    3203  {
    3204  case VMA_SUBALLOCATION_TYPE_FREE:
    3205  return false;
    3206  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    3207  return true;
    3208  case VMA_SUBALLOCATION_TYPE_BUFFER:
    3209  return
    3210  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3211  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3212  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    3213  return
    3214  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3215  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    3216  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3217  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    3218  return
    3219  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3220  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    3221  return false;
    3222  default:
    3223  VMA_ASSERT(0);
    3224  return true;
    3225  }
    3226 }
    3227 
    3228 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
    3229 {
    3230  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
    3231  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3232  for(size_t i = 0; i < numberCount; ++i, ++pDst)
    3233  {
    3234  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
    3235  }
    3236 }
    3237 
    3238 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
    3239 {
    3240  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
    3241  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3242  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
    3243  {
    3244  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
    3245  {
    3246  return false;
    3247  }
    3248  }
    3249  return true;
    3250 }
    3251 
    3252 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    3253 struct VmaMutexLock
    3254 {
    3255  VMA_CLASS_NO_COPY(VmaMutexLock)
    3256 public:
    3257  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
    3258  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3259  {
    3260  if(m_pMutex)
    3261  {
    3262  m_pMutex->Lock();
    3263  }
    3264  }
    3265 
    3266  ~VmaMutexLock()
    3267  {
    3268  if(m_pMutex)
    3269  {
    3270  m_pMutex->Unlock();
    3271  }
    3272  }
    3273 
    3274 private:
    3275  VMA_MUTEX* m_pMutex;
    3276 };
    3277 
    3278 #if VMA_DEBUG_GLOBAL_MUTEX
    3279  static VMA_MUTEX gDebugGlobalMutex;
    3280  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    3281 #else
    3282  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    3283 #endif
    3284 
    3285 // Minimum size of a free suballocation to register it in the free suballocation collection.
    3286 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    3287 
    3288 /*
    3289 Performs binary search and returns iterator to first element that is greater or
    3290 equal to (key), according to comparison (cmp).
    3291 
    3292 Cmp should return true if first argument is less than second argument.
    3293 
    3294 Returned value is the found element, if present in the collection or place where
    3295 new element with value (key) should be inserted.
    3296 */
    3297 template <typename CmpLess, typename IterT, typename KeyT>
    3298 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
    3299 {
    3300  size_t down = 0, up = (end - beg);
    3301  while(down < up)
    3302  {
    3303  const size_t mid = (down + up) / 2;
    3304  if(cmp(*(beg+mid), key))
    3305  {
    3306  down = mid + 1;
    3307  }
    3308  else
    3309  {
    3310  up = mid;
    3311  }
    3312  }
    3313  return beg + down;
    3314 }
    3315 
    3317 // Memory allocation
    3318 
    3319 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    3320 {
    3321  if((pAllocationCallbacks != VMA_NULL) &&
    3322  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    3323  {
    3324  return (*pAllocationCallbacks->pfnAllocation)(
    3325  pAllocationCallbacks->pUserData,
    3326  size,
    3327  alignment,
    3328  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    3329  }
    3330  else
    3331  {
    3332  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    3333  }
    3334 }
    3335 
    3336 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    3337 {
    3338  if((pAllocationCallbacks != VMA_NULL) &&
    3339  (pAllocationCallbacks->pfnFree != VMA_NULL))
    3340  {
    3341  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    3342  }
    3343  else
    3344  {
    3345  VMA_SYSTEM_FREE(ptr);
    3346  }
    3347 }
    3348 
    3349 template<typename T>
    3350 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    3351 {
    3352  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    3353 }
    3354 
    3355 template<typename T>
    3356 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    3357 {
    3358  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    3359 }
    3360 
    3361 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    3362 
    3363 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    3364 
    3365 template<typename T>
    3366 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    3367 {
    3368  ptr->~T();
    3369  VmaFree(pAllocationCallbacks, ptr);
    3370 }
    3371 
    3372 template<typename T>
    3373 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    3374 {
    3375  if(ptr != VMA_NULL)
    3376  {
    3377  for(size_t i = count; i--; )
    3378  {
    3379  ptr[i].~T();
    3380  }
    3381  VmaFree(pAllocationCallbacks, ptr);
    3382  }
    3383 }
    3384 
    3385 // STL-compatible allocator.
    3386 template<typename T>
    3387 class VmaStlAllocator
    3388 {
    3389 public:
    3390  const VkAllocationCallbacks* const m_pCallbacks;
    3391  typedef T value_type;
    3392 
    3393  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    3394  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    3395 
    3396  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    3397  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    3398 
    3399  template<typename U>
    3400  bool operator==(const VmaStlAllocator<U>& rhs) const
    3401  {
    3402  return m_pCallbacks == rhs.m_pCallbacks;
    3403  }
    3404  template<typename U>
    3405  bool operator!=(const VmaStlAllocator<U>& rhs) const
    3406  {
    3407  return m_pCallbacks != rhs.m_pCallbacks;
    3408  }
    3409 
    3410  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    3411 };
    3412 
    3413 #if VMA_USE_STL_VECTOR
    3414 
    3415 #define VmaVector std::vector
    3416 
    3417 template<typename T, typename allocatorT>
    3418 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    3419 {
    3420  vec.insert(vec.begin() + index, item);
    3421 }
    3422 
    3423 template<typename T, typename allocatorT>
    3424 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    3425 {
    3426  vec.erase(vec.begin() + index);
    3427 }
    3428 
    3429 #else // #if VMA_USE_STL_VECTOR
    3430 
    3431 /* Class with interface compatible with subset of std::vector.
    3432 T must be POD because constructors and destructors are not called and memcpy is
    3433 used for these objects. */
    3434 template<typename T, typename AllocatorT>
    3435 class VmaVector
    3436 {
    3437 public:
    3438  typedef T value_type;
    3439 
    3440  VmaVector(const AllocatorT& allocator) :
    3441  m_Allocator(allocator),
    3442  m_pArray(VMA_NULL),
    3443  m_Count(0),
    3444  m_Capacity(0)
    3445  {
    3446  }
    3447 
    3448  VmaVector(size_t count, const AllocatorT& allocator) :
    3449  m_Allocator(allocator),
    3450  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    3451  m_Count(count),
    3452  m_Capacity(count)
    3453  {
    3454  }
    3455 
    3456  VmaVector(const VmaVector<T, AllocatorT>& src) :
    3457  m_Allocator(src.m_Allocator),
    3458  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    3459  m_Count(src.m_Count),
    3460  m_Capacity(src.m_Count)
    3461  {
    3462  if(m_Count != 0)
    3463  {
    3464  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    3465  }
    3466  }
    3467 
    3468  ~VmaVector()
    3469  {
    3470  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3471  }
    3472 
    3473  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    3474  {
    3475  if(&rhs != this)
    3476  {
    3477  resize(rhs.m_Count);
    3478  if(m_Count != 0)
    3479  {
    3480  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    3481  }
    3482  }
    3483  return *this;
    3484  }
    3485 
    3486  bool empty() const { return m_Count == 0; }
    3487  size_t size() const { return m_Count; }
    3488  T* data() { return m_pArray; }
    3489  const T* data() const { return m_pArray; }
    3490 
    3491  T& operator[](size_t index)
    3492  {
    3493  VMA_HEAVY_ASSERT(index < m_Count);
    3494  return m_pArray[index];
    3495  }
    3496  const T& operator[](size_t index) const
    3497  {
    3498  VMA_HEAVY_ASSERT(index < m_Count);
    3499  return m_pArray[index];
    3500  }
    3501 
    3502  T& front()
    3503  {
    3504  VMA_HEAVY_ASSERT(m_Count > 0);
    3505  return m_pArray[0];
    3506  }
    3507  const T& front() const
    3508  {
    3509  VMA_HEAVY_ASSERT(m_Count > 0);
    3510  return m_pArray[0];
    3511  }
    3512  T& back()
    3513  {
    3514  VMA_HEAVY_ASSERT(m_Count > 0);
    3515  return m_pArray[m_Count - 1];
    3516  }
    3517  const T& back() const
    3518  {
    3519  VMA_HEAVY_ASSERT(m_Count > 0);
    3520  return m_pArray[m_Count - 1];
    3521  }
    3522 
    3523  void reserve(size_t newCapacity, bool freeMemory = false)
    3524  {
    3525  newCapacity = VMA_MAX(newCapacity, m_Count);
    3526 
    3527  if((newCapacity < m_Capacity) && !freeMemory)
    3528  {
    3529  newCapacity = m_Capacity;
    3530  }
    3531 
    3532  if(newCapacity != m_Capacity)
    3533  {
    3534  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    3535  if(m_Count != 0)
    3536  {
    3537  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    3538  }
    3539  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3540  m_Capacity = newCapacity;
    3541  m_pArray = newArray;
    3542  }
    3543  }
    3544 
    3545  void resize(size_t newCount, bool freeMemory = false)
    3546  {
    3547  size_t newCapacity = m_Capacity;
    3548  if(newCount > m_Capacity)
    3549  {
    3550  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    3551  }
    3552  else if(freeMemory)
    3553  {
    3554  newCapacity = newCount;
    3555  }
    3556 
    3557  if(newCapacity != m_Capacity)
    3558  {
    3559  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    3560  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    3561  if(elementsToCopy != 0)
    3562  {
    3563  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    3564  }
    3565  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3566  m_Capacity = newCapacity;
    3567  m_pArray = newArray;
    3568  }
    3569 
    3570  m_Count = newCount;
    3571  }
    3572 
    3573  void clear(bool freeMemory = false)
    3574  {
    3575  resize(0, freeMemory);
    3576  }
    3577 
    3578  void insert(size_t index, const T& src)
    3579  {
    3580  VMA_HEAVY_ASSERT(index <= m_Count);
    3581  const size_t oldCount = size();
    3582  resize(oldCount + 1);
    3583  if(index < oldCount)
    3584  {
    3585  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    3586  }
    3587  m_pArray[index] = src;
    3588  }
    3589 
    3590  void remove(size_t index)
    3591  {
    3592  VMA_HEAVY_ASSERT(index < m_Count);
    3593  const size_t oldCount = size();
    3594  if(index < oldCount - 1)
    3595  {
    3596  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    3597  }
    3598  resize(oldCount - 1);
    3599  }
    3600 
    3601  void push_back(const T& src)
    3602  {
    3603  const size_t newIndex = size();
    3604  resize(newIndex + 1);
    3605  m_pArray[newIndex] = src;
    3606  }
    3607 
    3608  void pop_back()
    3609  {
    3610  VMA_HEAVY_ASSERT(m_Count > 0);
    3611  resize(size() - 1);
    3612  }
    3613 
    3614  void push_front(const T& src)
    3615  {
    3616  insert(0, src);
    3617  }
    3618 
    3619  void pop_front()
    3620  {
    3621  VMA_HEAVY_ASSERT(m_Count > 0);
    3622  remove(0);
    3623  }
    3624 
    3625  typedef T* iterator;
    3626 
    3627  iterator begin() { return m_pArray; }
    3628  iterator end() { return m_pArray + m_Count; }
    3629 
    3630 private:
    3631  AllocatorT m_Allocator;
    3632  T* m_pArray;
    3633  size_t m_Count;
    3634  size_t m_Capacity;
    3635 };
    3636 
    3637 template<typename T, typename allocatorT>
    3638 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    3639 {
    3640  vec.insert(index, item);
    3641 }
    3642 
    3643 template<typename T, typename allocatorT>
    3644 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    3645 {
    3646  vec.remove(index);
    3647 }
    3648 
    3649 #endif // #if VMA_USE_STL_VECTOR
    3650 
    3651 template<typename CmpLess, typename VectorT>
    3652 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    3653 {
    3654  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    3655  vector.data(),
    3656  vector.data() + vector.size(),
    3657  value,
    3658  CmpLess()) - vector.data();
    3659  VmaVectorInsert(vector, indexToInsert, value);
    3660  return indexToInsert;
    3661 }
    3662 
    3663 template<typename CmpLess, typename VectorT>
    3664 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    3665 {
    3666  CmpLess comparator;
    3667  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    3668  vector.begin(),
    3669  vector.end(),
    3670  value,
    3671  comparator);
    3672  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    3673  {
    3674  size_t indexToRemove = it - vector.begin();
    3675  VmaVectorRemove(vector, indexToRemove);
    3676  return true;
    3677  }
    3678  return false;
    3679 }
    3680 
    3681 template<typename CmpLess, typename IterT, typename KeyT>
    3682 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
    3683 {
    3684  CmpLess comparator;
    3685  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
    3686  beg, end, value, comparator);
    3687  if(it == end ||
    3688  (!comparator(*it, value) && !comparator(value, *it)))
    3689  {
    3690  return it;
    3691  }
    3692  return end;
    3693 }
    3694 
    3696 // class VmaPoolAllocator
    3697 
    3698 /*
    3699 Allocator for objects of type T using a list of arrays (pools) to speed up
    3700 allocation. Number of elements that can be allocated is not bounded because
    3701 allocator can create multiple blocks.
    3702 */
    3703 template<typename T>
    3704 class VmaPoolAllocator
    3705 {
    3706  VMA_CLASS_NO_COPY(VmaPoolAllocator)
    3707 public:
    3708  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
    3709  ~VmaPoolAllocator();
    3710  void Clear();
    3711  T* Alloc();
    3712  void Free(T* ptr);
    3713 
    3714 private:
    3715  union Item
    3716  {
    3717  uint32_t NextFreeIndex;
    3718  T Value;
    3719  };
    3720 
    3721  struct ItemBlock
    3722  {
    3723  Item* pItems;
    3724  uint32_t FirstFreeIndex;
    3725  };
    3726 
    3727  const VkAllocationCallbacks* m_pAllocationCallbacks;
    3728  size_t m_ItemsPerBlock;
    3729  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    3730 
    3731  ItemBlock& CreateNewBlock();
    3732 };
    3733 
    3734 template<typename T>
    3735 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
    3736  m_pAllocationCallbacks(pAllocationCallbacks),
    3737  m_ItemsPerBlock(itemsPerBlock),
    3738  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    3739 {
    3740  VMA_ASSERT(itemsPerBlock > 0);
    3741 }
    3742 
    3743 template<typename T>
    3744 VmaPoolAllocator<T>::~VmaPoolAllocator()
    3745 {
    3746  Clear();
    3747 }
    3748 
    3749 template<typename T>
    3750 void VmaPoolAllocator<T>::Clear()
    3751 {
    3752  for(size_t i = m_ItemBlocks.size(); i--; )
    3753  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
    3754  m_ItemBlocks.clear();
    3755 }
    3756 
    3757 template<typename T>
    3758 T* VmaPoolAllocator<T>::Alloc()
    3759 {
    3760  for(size_t i = m_ItemBlocks.size(); i--; )
    3761  {
    3762  ItemBlock& block = m_ItemBlocks[i];
    3763  // This block has some free items: Use first one.
    3764  if(block.FirstFreeIndex != UINT32_MAX)
    3765  {
    3766  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    3767  block.FirstFreeIndex = pItem->NextFreeIndex;
    3768  return &pItem->Value;
    3769  }
    3770  }
    3771 
    3772  // No block has free item: Create new one and use it.
    3773  ItemBlock& newBlock = CreateNewBlock();
    3774  Item* const pItem = &newBlock.pItems[0];
    3775  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    3776  return &pItem->Value;
    3777 }
    3778 
    3779 template<typename T>
    3780 void VmaPoolAllocator<T>::Free(T* ptr)
    3781 {
    3782  // Search all memory blocks to find ptr.
    3783  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
    3784  {
    3785  ItemBlock& block = m_ItemBlocks[i];
    3786 
    3787  // Casting to union.
    3788  Item* pItemPtr;
    3789  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    3790 
    3791  // Check if pItemPtr is in address range of this block.
    3792  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
    3793  {
    3794  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    3795  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    3796  block.FirstFreeIndex = index;
    3797  return;
    3798  }
    3799  }
    3800  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    3801 }
    3802 
    3803 template<typename T>
    3804 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    3805 {
    3806  ItemBlock newBlock = {
    3807  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
    3808 
    3809  m_ItemBlocks.push_back(newBlock);
    3810 
    3811  // Setup singly-linked list of all free items in this block.
    3812  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
    3813  newBlock.pItems[i].NextFreeIndex = i + 1;
    3814  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
    3815  return m_ItemBlocks.back();
    3816 }
    3817 
    3819 // class VmaRawList, VmaList
    3820 
    3821 #if VMA_USE_STL_LIST
    3822 
    3823 #define VmaList std::list
    3824 
    3825 #else // #if VMA_USE_STL_LIST
    3826 
    3827 template<typename T>
    3828 struct VmaListItem
    3829 {
    3830  VmaListItem* pPrev;
    3831  VmaListItem* pNext;
    3832  T Value;
    3833 };
    3834 
    3835 // Doubly linked list.
    3836 template<typename T>
    3837 class VmaRawList
    3838 {
    3839  VMA_CLASS_NO_COPY(VmaRawList)
    3840 public:
    3841  typedef VmaListItem<T> ItemType;
    3842 
    3843  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    3844  ~VmaRawList();
    3845  void Clear();
    3846 
    3847  size_t GetCount() const { return m_Count; }
    3848  bool IsEmpty() const { return m_Count == 0; }
    3849 
    3850  ItemType* Front() { return m_pFront; }
    3851  const ItemType* Front() const { return m_pFront; }
    3852  ItemType* Back() { return m_pBack; }
    3853  const ItemType* Back() const { return m_pBack; }
    3854 
    3855  ItemType* PushBack();
    3856  ItemType* PushFront();
    3857  ItemType* PushBack(const T& value);
    3858  ItemType* PushFront(const T& value);
    3859  void PopBack();
    3860  void PopFront();
    3861 
    3862  // Item can be null - it means PushBack.
    3863  ItemType* InsertBefore(ItemType* pItem);
    3864  // Item can be null - it means PushFront.
    3865  ItemType* InsertAfter(ItemType* pItem);
    3866 
    3867  ItemType* InsertBefore(ItemType* pItem, const T& value);
    3868  ItemType* InsertAfter(ItemType* pItem, const T& value);
    3869 
    3870  void Remove(ItemType* pItem);
    3871 
    3872 private:
    3873  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    3874  VmaPoolAllocator<ItemType> m_ItemAllocator;
    3875  ItemType* m_pFront;
    3876  ItemType* m_pBack;
    3877  size_t m_Count;
    3878 };
    3879 
    3880 template<typename T>
    3881 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    3882  m_pAllocationCallbacks(pAllocationCallbacks),
    3883  m_ItemAllocator(pAllocationCallbacks, 128),
    3884  m_pFront(VMA_NULL),
    3885  m_pBack(VMA_NULL),
    3886  m_Count(0)
    3887 {
    3888 }
    3889 
    3890 template<typename T>
    3891 VmaRawList<T>::~VmaRawList()
    3892 {
    3893  // Intentionally not calling Clear, because that would be unnecessary
    3894  // computations to return all items to m_ItemAllocator as free.
    3895 }
    3896 
    3897 template<typename T>
    3898 void VmaRawList<T>::Clear()
    3899 {
    3900  if(IsEmpty() == false)
    3901  {
    3902  ItemType* pItem = m_pBack;
    3903  while(pItem != VMA_NULL)
    3904  {
    3905  ItemType* const pPrevItem = pItem->pPrev;
    3906  m_ItemAllocator.Free(pItem);
    3907  pItem = pPrevItem;
    3908  }
    3909  m_pFront = VMA_NULL;
    3910  m_pBack = VMA_NULL;
    3911  m_Count = 0;
    3912  }
    3913 }
    3914 
    3915 template<typename T>
    3916 VmaListItem<T>* VmaRawList<T>::PushBack()
    3917 {
    3918  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3919  pNewItem->pNext = VMA_NULL;
    3920  if(IsEmpty())
    3921  {
    3922  pNewItem->pPrev = VMA_NULL;
    3923  m_pFront = pNewItem;
    3924  m_pBack = pNewItem;
    3925  m_Count = 1;
    3926  }
    3927  else
    3928  {
    3929  pNewItem->pPrev = m_pBack;
    3930  m_pBack->pNext = pNewItem;
    3931  m_pBack = pNewItem;
    3932  ++m_Count;
    3933  }
    3934  return pNewItem;
    3935 }
    3936 
    3937 template<typename T>
    3938 VmaListItem<T>* VmaRawList<T>::PushFront()
    3939 {
    3940  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3941  pNewItem->pPrev = VMA_NULL;
    3942  if(IsEmpty())
    3943  {
    3944  pNewItem->pNext = VMA_NULL;
    3945  m_pFront = pNewItem;
    3946  m_pBack = pNewItem;
    3947  m_Count = 1;
    3948  }
    3949  else
    3950  {
    3951  pNewItem->pNext = m_pFront;
    3952  m_pFront->pPrev = pNewItem;
    3953  m_pFront = pNewItem;
    3954  ++m_Count;
    3955  }
    3956  return pNewItem;
    3957 }
    3958 
    3959 template<typename T>
    3960 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    3961 {
    3962  ItemType* const pNewItem = PushBack();
    3963  pNewItem->Value = value;
    3964  return pNewItem;
    3965 }
    3966 
    3967 template<typename T>
    3968 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    3969 {
    3970  ItemType* const pNewItem = PushFront();
    3971  pNewItem->Value = value;
    3972  return pNewItem;
    3973 }
    3974 
    3975 template<typename T>
    3976 void VmaRawList<T>::PopBack()
    3977 {
    3978  VMA_HEAVY_ASSERT(m_Count > 0);
    3979  ItemType* const pBackItem = m_pBack;
    3980  ItemType* const pPrevItem = pBackItem->pPrev;
    3981  if(pPrevItem != VMA_NULL)
    3982  {
    3983  pPrevItem->pNext = VMA_NULL;
    3984  }
    3985  m_pBack = pPrevItem;
    3986  m_ItemAllocator.Free(pBackItem);
    3987  --m_Count;
    3988 }
    3989 
    3990 template<typename T>
    3991 void VmaRawList<T>::PopFront()
    3992 {
    3993  VMA_HEAVY_ASSERT(m_Count > 0);
    3994  ItemType* const pFrontItem = m_pFront;
    3995  ItemType* const pNextItem = pFrontItem->pNext;
    3996  if(pNextItem != VMA_NULL)
    3997  {
    3998  pNextItem->pPrev = VMA_NULL;
    3999  }
    4000  m_pFront = pNextItem;
    4001  m_ItemAllocator.Free(pFrontItem);
    4002  --m_Count;
    4003 }
    4004 
    4005 template<typename T>
    4006 void VmaRawList<T>::Remove(ItemType* pItem)
    4007 {
    4008  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    4009  VMA_HEAVY_ASSERT(m_Count > 0);
    4010 
    4011  if(pItem->pPrev != VMA_NULL)
    4012  {
    4013  pItem->pPrev->pNext = pItem->pNext;
    4014  }
    4015  else
    4016  {
    4017  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4018  m_pFront = pItem->pNext;
    4019  }
    4020 
    4021  if(pItem->pNext != VMA_NULL)
    4022  {
    4023  pItem->pNext->pPrev = pItem->pPrev;
    4024  }
    4025  else
    4026  {
    4027  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4028  m_pBack = pItem->pPrev;
    4029  }
    4030 
    4031  m_ItemAllocator.Free(pItem);
    4032  --m_Count;
    4033 }
    4034 
    4035 template<typename T>
    4036 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    4037 {
    4038  if(pItem != VMA_NULL)
    4039  {
    4040  ItemType* const prevItem = pItem->pPrev;
    4041  ItemType* const newItem = m_ItemAllocator.Alloc();
    4042  newItem->pPrev = prevItem;
    4043  newItem->pNext = pItem;
    4044  pItem->pPrev = newItem;
    4045  if(prevItem != VMA_NULL)
    4046  {
    4047  prevItem->pNext = newItem;
    4048  }
    4049  else
    4050  {
    4051  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4052  m_pFront = newItem;
    4053  }
    4054  ++m_Count;
    4055  return newItem;
    4056  }
    4057  else
    4058  return PushBack();
    4059 }
    4060 
    4061 template<typename T>
    4062 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    4063 {
    4064  if(pItem != VMA_NULL)
    4065  {
    4066  ItemType* const nextItem = pItem->pNext;
    4067  ItemType* const newItem = m_ItemAllocator.Alloc();
    4068  newItem->pNext = nextItem;
    4069  newItem->pPrev = pItem;
    4070  pItem->pNext = newItem;
    4071  if(nextItem != VMA_NULL)
    4072  {
    4073  nextItem->pPrev = newItem;
    4074  }
    4075  else
    4076  {
    4077  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4078  m_pBack = newItem;
    4079  }
    4080  ++m_Count;
    4081  return newItem;
    4082  }
    4083  else
    4084  return PushFront();
    4085 }
    4086 
    4087 template<typename T>
    4088 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    4089 {
    4090  ItemType* const newItem = InsertBefore(pItem);
    4091  newItem->Value = value;
    4092  return newItem;
    4093 }
    4094 
    4095 template<typename T>
    4096 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    4097 {
    4098  ItemType* const newItem = InsertAfter(pItem);
    4099  newItem->Value = value;
    4100  return newItem;
    4101 }
    4102 
    4103 template<typename T, typename AllocatorT>
    4104 class VmaList
    4105 {
    4106  VMA_CLASS_NO_COPY(VmaList)
    4107 public:
    4108  class iterator
    4109  {
    4110  public:
    4111  iterator() :
    4112  m_pList(VMA_NULL),
    4113  m_pItem(VMA_NULL)
    4114  {
    4115  }
    4116 
    4117  T& operator*() const
    4118  {
    4119  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4120  return m_pItem->Value;
    4121  }
    4122  T* operator->() const
    4123  {
    4124  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4125  return &m_pItem->Value;
    4126  }
    4127 
    4128  iterator& operator++()
    4129  {
    4130  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4131  m_pItem = m_pItem->pNext;
    4132  return *this;
    4133  }
    4134  iterator& operator--()
    4135  {
    4136  if(m_pItem != VMA_NULL)
    4137  {
    4138  m_pItem = m_pItem->pPrev;
    4139  }
    4140  else
    4141  {
    4142  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4143  m_pItem = m_pList->Back();
    4144  }
    4145  return *this;
    4146  }
    4147 
    4148  iterator operator++(int)
    4149  {
    4150  iterator result = *this;
    4151  ++*this;
    4152  return result;
    4153  }
    4154  iterator operator--(int)
    4155  {
    4156  iterator result = *this;
    4157  --*this;
    4158  return result;
    4159  }
    4160 
    4161  bool operator==(const iterator& rhs) const
    4162  {
    4163  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4164  return m_pItem == rhs.m_pItem;
    4165  }
    4166  bool operator!=(const iterator& rhs) const
    4167  {
    4168  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4169  return m_pItem != rhs.m_pItem;
    4170  }
    4171 
    4172  private:
    4173  VmaRawList<T>* m_pList;
    4174  VmaListItem<T>* m_pItem;
    4175 
    4176  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    4177  m_pList(pList),
    4178  m_pItem(pItem)
    4179  {
    4180  }
    4181 
    4182  friend class VmaList<T, AllocatorT>;
    4183  };
    4184 
    4185  class const_iterator
    4186  {
    4187  public:
    4188  const_iterator() :
    4189  m_pList(VMA_NULL),
    4190  m_pItem(VMA_NULL)
    4191  {
    4192  }
    4193 
    4194  const_iterator(const iterator& src) :
    4195  m_pList(src.m_pList),
    4196  m_pItem(src.m_pItem)
    4197  {
    4198  }
    4199 
    4200  const T& operator*() const
    4201  {
    4202  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4203  return m_pItem->Value;
    4204  }
    4205  const T* operator->() const
    4206  {
    4207  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4208  return &m_pItem->Value;
    4209  }
    4210 
    4211  const_iterator& operator++()
    4212  {
    4213  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4214  m_pItem = m_pItem->pNext;
    4215  return *this;
    4216  }
    4217  const_iterator& operator--()
    4218  {
    4219  if(m_pItem != VMA_NULL)
    4220  {
    4221  m_pItem = m_pItem->pPrev;
    4222  }
    4223  else
    4224  {
    4225  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4226  m_pItem = m_pList->Back();
    4227  }
    4228  return *this;
    4229  }
    4230 
    4231  const_iterator operator++(int)
    4232  {
    4233  const_iterator result = *this;
    4234  ++*this;
    4235  return result;
    4236  }
    4237  const_iterator operator--(int)
    4238  {
    4239  const_iterator result = *this;
    4240  --*this;
    4241  return result;
    4242  }
    4243 
    4244  bool operator==(const const_iterator& rhs) const
    4245  {
    4246  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4247  return m_pItem == rhs.m_pItem;
    4248  }
    4249  bool operator!=(const const_iterator& rhs) const
    4250  {
    4251  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4252  return m_pItem != rhs.m_pItem;
    4253  }
    4254 
    4255  private:
    4256  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    4257  m_pList(pList),
    4258  m_pItem(pItem)
    4259  {
    4260  }
    4261 
    4262  const VmaRawList<T>* m_pList;
    4263  const VmaListItem<T>* m_pItem;
    4264 
    4265  friend class VmaList<T, AllocatorT>;
    4266  };
    4267 
    4268  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    4269 
    4270  bool empty() const { return m_RawList.IsEmpty(); }
    4271  size_t size() const { return m_RawList.GetCount(); }
    4272 
    4273  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    4274  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    4275 
    4276  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    4277  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    4278 
    4279  void clear() { m_RawList.Clear(); }
    4280  void push_back(const T& value) { m_RawList.PushBack(value); }
    4281  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    4282  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    4283 
    4284 private:
    4285  VmaRawList<T> m_RawList;
    4286 };
    4287 
    4288 #endif // #if VMA_USE_STL_LIST
    4289 
    4291 // class VmaMap
    4292 
    4293 // Unused in this version.
    4294 #if 0
    4295 
    4296 #if VMA_USE_STL_UNORDERED_MAP
    4297 
    4298 #define VmaPair std::pair
    4299 
    4300 #define VMA_MAP_TYPE(KeyT, ValueT) \
    4301  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    4302 
    4303 #else // #if VMA_USE_STL_UNORDERED_MAP
    4304 
    4305 template<typename T1, typename T2>
    4306 struct VmaPair
    4307 {
    4308  T1 first;
    4309  T2 second;
    4310 
    4311  VmaPair() : first(), second() { }
    4312  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    4313 };
    4314 
    4315 /* Class compatible with subset of interface of std::unordered_map.
    4316 KeyT, ValueT must be POD because they will be stored in VmaVector.
    4317 */
    4318 template<typename KeyT, typename ValueT>
    4319 class VmaMap
    4320 {
    4321 public:
    4322  typedef VmaPair<KeyT, ValueT> PairType;
    4323  typedef PairType* iterator;
    4324 
    4325  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    4326 
    4327  iterator begin() { return m_Vector.begin(); }
    4328  iterator end() { return m_Vector.end(); }
    4329 
    4330  void insert(const PairType& pair);
    4331  iterator find(const KeyT& key);
    4332  void erase(iterator it);
    4333 
    4334 private:
    4335  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    4336 };
    4337 
    4338 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    4339 
    4340 template<typename FirstT, typename SecondT>
    4341 struct VmaPairFirstLess
    4342 {
    4343  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    4344  {
    4345  return lhs.first < rhs.first;
    4346  }
    4347  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    4348  {
    4349  return lhs.first < rhsFirst;
    4350  }
    4351 };
    4352 
    4353 template<typename KeyT, typename ValueT>
    4354 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    4355 {
    4356  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4357  m_Vector.data(),
    4358  m_Vector.data() + m_Vector.size(),
    4359  pair,
    4360  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    4361  VmaVectorInsert(m_Vector, indexToInsert, pair);
    4362 }
    4363 
    4364 template<typename KeyT, typename ValueT>
    4365 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    4366 {
    4367  PairType* it = VmaBinaryFindFirstNotLess(
    4368  m_Vector.data(),
    4369  m_Vector.data() + m_Vector.size(),
    4370  key,
    4371  VmaPairFirstLess<KeyT, ValueT>());
    4372  if((it != m_Vector.end()) && (it->first == key))
    4373  {
    4374  return it;
    4375  }
    4376  else
    4377  {
    4378  return m_Vector.end();
    4379  }
    4380 }
    4381 
    4382 template<typename KeyT, typename ValueT>
    4383 void VmaMap<KeyT, ValueT>::erase(iterator it)
    4384 {
    4385  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    4386 }
    4387 
    4388 #endif // #if VMA_USE_STL_UNORDERED_MAP
    4389 
    4390 #endif // #if 0
    4391 
    4393 
    4394 class VmaDeviceMemoryBlock;
    4395 
    4396 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
    4397 
    4398 struct VmaAllocation_T
    4399 {
    4400  VMA_CLASS_NO_COPY(VmaAllocation_T)
    4401 private:
    4402  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    4403 
    4404  enum FLAGS
    4405  {
    4406  FLAG_USER_DATA_STRING = 0x01,
    4407  };
    4408 
    4409 public:
    4410  enum ALLOCATION_TYPE
    4411  {
    4412  ALLOCATION_TYPE_NONE,
    4413  ALLOCATION_TYPE_BLOCK,
    4414  ALLOCATION_TYPE_DEDICATED,
    4415  };
    4416 
    4417  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
    4418  m_Alignment(1),
    4419  m_Size(0),
    4420  m_pUserData(VMA_NULL),
    4421  m_LastUseFrameIndex(currentFrameIndex),
    4422  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
    4423  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
    4424  m_MapCount(0),
    4425  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
    4426  {
    4427 #if VMA_STATS_STRING_ENABLED
    4428  m_CreationFrameIndex = currentFrameIndex;
    4429  m_BufferImageUsage = 0;
    4430 #endif
    4431  }
    4432 
    4433  ~VmaAllocation_T()
    4434  {
    4435  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    4436 
    4437  // Check if owned string was freed.
    4438  VMA_ASSERT(m_pUserData == VMA_NULL);
    4439  }
    4440 
    4441  void InitBlockAllocation(
    4442  VmaPool hPool,
    4443  VmaDeviceMemoryBlock* block,
    4444  VkDeviceSize offset,
    4445  VkDeviceSize alignment,
    4446  VkDeviceSize size,
    4447  VmaSuballocationType suballocationType,
    4448  bool mapped,
    4449  bool canBecomeLost)
    4450  {
    4451  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4452  VMA_ASSERT(block != VMA_NULL);
    4453  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4454  m_Alignment = alignment;
    4455  m_Size = size;
    4456  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4457  m_SuballocationType = (uint8_t)suballocationType;
    4458  m_BlockAllocation.m_hPool = hPool;
    4459  m_BlockAllocation.m_Block = block;
    4460  m_BlockAllocation.m_Offset = offset;
    4461  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    4462  }
    4463 
    4464  void InitLost()
    4465  {
    4466  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4467  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    4468  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4469  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
    4470  m_BlockAllocation.m_Block = VMA_NULL;
    4471  m_BlockAllocation.m_Offset = 0;
    4472  m_BlockAllocation.m_CanBecomeLost = true;
    4473  }
    4474 
    4475  void ChangeBlockAllocation(
    4476  VmaAllocator hAllocator,
    4477  VmaDeviceMemoryBlock* block,
    4478  VkDeviceSize offset);
    4479 
    4480  // pMappedData not null means allocation is created with MAPPED flag.
    4481  void InitDedicatedAllocation(
    4482  uint32_t memoryTypeIndex,
    4483  VkDeviceMemory hMemory,
    4484  VmaSuballocationType suballocationType,
    4485  void* pMappedData,
    4486  VkDeviceSize size)
    4487  {
    4488  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4489  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    4490  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    4491  m_Alignment = 0;
    4492  m_Size = size;
    4493  m_SuballocationType = (uint8_t)suballocationType;
    4494  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4495  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    4496  m_DedicatedAllocation.m_hMemory = hMemory;
    4497  m_DedicatedAllocation.m_pMappedData = pMappedData;
    4498  }
    4499 
    4500  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    4501  VkDeviceSize GetAlignment() const { return m_Alignment; }
    4502  VkDeviceSize GetSize() const { return m_Size; }
    4503  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
    4504  void* GetUserData() const { return m_pUserData; }
    4505  void SetUserData(VmaAllocator hAllocator, void* pUserData);
    4506  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    4507 
    4508  VmaDeviceMemoryBlock* GetBlock() const
    4509  {
    4510  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    4511  return m_BlockAllocation.m_Block;
    4512  }
    4513  VkDeviceSize GetOffset() const;
    4514  VkDeviceMemory GetMemory() const;
    4515  uint32_t GetMemoryTypeIndex() const;
    4516  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    4517  void* GetMappedData() const;
    4518  bool CanBecomeLost() const;
    4519  VmaPool GetPool() const;
    4520 
    4521  uint32_t GetLastUseFrameIndex() const
    4522  {
    4523  return m_LastUseFrameIndex.load();
    4524  }
    4525  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    4526  {
    4527  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    4528  }
    4529  /*
    4530  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    4531  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    4532  - Else, returns false.
    4533 
    4534  If hAllocation is already lost, assert - you should not call it then.
    4535  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    4536  */
    4537  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4538 
    4539  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    4540  {
    4541  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    4542  outInfo.blockCount = 1;
    4543  outInfo.allocationCount = 1;
    4544  outInfo.unusedRangeCount = 0;
    4545  outInfo.usedBytes = m_Size;
    4546  outInfo.unusedBytes = 0;
    4547  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    4548  outInfo.unusedRangeSizeMin = UINT64_MAX;
    4549  outInfo.unusedRangeSizeMax = 0;
    4550  }
    4551 
    4552  void BlockAllocMap();
    4553  void BlockAllocUnmap();
    4554  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    4555  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    4556 
    4557 #if VMA_STATS_STRING_ENABLED
    4558  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
    4559  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
    4560 
    4561  void InitBufferImageUsage(uint32_t bufferImageUsage)
    4562  {
    4563  VMA_ASSERT(m_BufferImageUsage == 0);
    4564  m_BufferImageUsage = bufferImageUsage;
    4565  }
    4566 
    4567  void PrintParameters(class VmaJsonWriter& json) const;
    4568 #endif
    4569 
    4570 private:
    4571  VkDeviceSize m_Alignment;
    4572  VkDeviceSize m_Size;
    4573  void* m_pUserData;
    4574  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    4575  uint8_t m_Type; // ALLOCATION_TYPE
    4576  uint8_t m_SuballocationType; // VmaSuballocationType
    4577  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    4578  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
    4579  uint8_t m_MapCount;
    4580  uint8_t m_Flags; // enum FLAGS
    4581 
    4582  // Allocation out of VmaDeviceMemoryBlock.
    4583  struct BlockAllocation
    4584  {
    4585  VmaPool m_hPool; // Null if belongs to general memory.
    4586  VmaDeviceMemoryBlock* m_Block;
    4587  VkDeviceSize m_Offset;
    4588  bool m_CanBecomeLost;
    4589  };
    4590 
    4591  // Allocation for an object that has its own private VkDeviceMemory.
    4592  struct DedicatedAllocation
    4593  {
    4594  uint32_t m_MemoryTypeIndex;
    4595  VkDeviceMemory m_hMemory;
    4596  void* m_pMappedData; // Not null means memory is mapped.
    4597  };
    4598 
    4599  union
    4600  {
    4601  // Allocation out of VmaDeviceMemoryBlock.
    4602  BlockAllocation m_BlockAllocation;
    4603  // Allocation for an object that has its own private VkDeviceMemory.
    4604  DedicatedAllocation m_DedicatedAllocation;
    4605  };
    4606 
    4607 #if VMA_STATS_STRING_ENABLED
    4608  uint32_t m_CreationFrameIndex;
    4609  uint32_t m_BufferImageUsage; // 0 if unknown.
    4610 #endif
    4611 
    4612  void FreeUserDataString(VmaAllocator hAllocator);
    4613 };
    4614 
    4615 /*
    4616 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    4617 allocated memory block or free.
    4618 */
    4619 struct VmaSuballocation
    4620 {
    4621  VkDeviceSize offset;
    4622  VkDeviceSize size;
    4623  VmaAllocation hAllocation;
    4624  VmaSuballocationType type;
    4625 };
    4626 
    4627 // Comparator for offsets.
    4628 struct VmaSuballocationOffsetLess
    4629 {
    4630  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4631  {
    4632  return lhs.offset < rhs.offset;
    4633  }
    4634 };
    4635 struct VmaSuballocationOffsetGreater
    4636 {
    4637  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4638  {
    4639  return lhs.offset > rhs.offset;
    4640  }
    4641 };
    4642 
    4643 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    4644 
    4645 // Cost of one additional allocation lost, as equivalent in bytes.
    4646 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    4647 
    4648 /*
    4649 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    4650 
    4651 If canMakeOtherLost was false:
    4652 - item points to a FREE suballocation.
    4653 - itemsToMakeLostCount is 0.
    4654 
    4655 If canMakeOtherLost was true:
    4656 - item points to first of sequence of suballocations, which are either FREE,
    4657  or point to VmaAllocations that can become lost.
    4658 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    4659  the requested allocation to succeed.
    4660 */
    4661 struct VmaAllocationRequest
    4662 {
    4663  VkDeviceSize offset;
    4664  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    4665  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    4666  VmaSuballocationList::iterator item;
    4667  size_t itemsToMakeLostCount;
    4668  void* customData;
    4669 
    4670  VkDeviceSize CalcCost() const
    4671  {
    4672  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    4673  }
    4674 };
    4675 
    4676 /*
    4677 Data structure used for bookkeeping of allocations and unused ranges of memory
    4678 in a single VkDeviceMemory block.
    4679 */
    4680 class VmaBlockMetadata
    4681 {
    4682 public:
    4683  VmaBlockMetadata(VmaAllocator hAllocator);
    4684  virtual ~VmaBlockMetadata() { }
    4685  virtual void Init(VkDeviceSize size) { m_Size = size; }
    4686 
    4687  // Validates all data structures inside this object. If not valid, returns false.
    4688  virtual bool Validate() const = 0;
    4689  VkDeviceSize GetSize() const { return m_Size; }
    4690  virtual size_t GetAllocationCount() const = 0;
    4691  virtual VkDeviceSize GetSumFreeSize() const = 0;
    4692  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
    4693  // Returns true if this block is empty - contains only single free suballocation.
    4694  virtual bool IsEmpty() const = 0;
    4695 
    4696  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
    4697  // Shouldn't modify blockCount.
    4698  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
    4699 
    4700 #if VMA_STATS_STRING_ENABLED
    4701  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
    4702 #endif
    4703 
    4704  // Tries to find a place for suballocation with given parameters inside this block.
    4705  // If succeeded, fills pAllocationRequest and returns true.
    4706  // If failed, returns false.
    4707  virtual bool CreateAllocationRequest(
    4708  uint32_t currentFrameIndex,
    4709  uint32_t frameInUseCount,
    4710  VkDeviceSize bufferImageGranularity,
    4711  VkDeviceSize allocSize,
    4712  VkDeviceSize allocAlignment,
    4713  bool upperAddress,
    4714  VmaSuballocationType allocType,
    4715  bool canMakeOtherLost,
    4716  uint32_t strategy, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* flags.
    4717  VmaAllocationRequest* pAllocationRequest) = 0;
    4718 
    4719  virtual bool MakeRequestedAllocationsLost(
    4720  uint32_t currentFrameIndex,
    4721  uint32_t frameInUseCount,
    4722  VmaAllocationRequest* pAllocationRequest) = 0;
    4723 
    4724  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
    4725 
    4726  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
    4727 
    4728  // Makes actual allocation based on request. Request must already be checked and valid.
    4729  virtual void Alloc(
    4730  const VmaAllocationRequest& request,
    4731  VmaSuballocationType type,
    4732  VkDeviceSize allocSize,
    4733  bool upperAddress,
    4734  VmaAllocation hAllocation) = 0;
    4735 
    4736  // Frees suballocation assigned to given memory region.
    4737  virtual void Free(const VmaAllocation allocation) = 0;
    4738  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
    4739 
    4740 protected:
    4741  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
    4742 
    4743 #if VMA_STATS_STRING_ENABLED
    4744  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
    4745  VkDeviceSize unusedBytes,
    4746  size_t allocationCount,
    4747  size_t unusedRangeCount) const;
    4748  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    4749  VkDeviceSize offset,
    4750  VmaAllocation hAllocation) const;
    4751  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    4752  VkDeviceSize offset,
    4753  VkDeviceSize size) const;
    4754  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
    4755 #endif
    4756 
    4757 private:
    4758  VkDeviceSize m_Size;
    4759  const VkAllocationCallbacks* m_pAllocationCallbacks;
    4760 };
    4761 
    4762 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
    4763  VMA_ASSERT(0 && "Validation failed: " #cond); \
    4764  return false; \
    4765  } } while(false)
    4766 
    4767 class VmaBlockMetadata_Generic : public VmaBlockMetadata
    4768 {
    4769  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
    4770 public:
    4771  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
    4772  virtual ~VmaBlockMetadata_Generic();
    4773  virtual void Init(VkDeviceSize size);
    4774 
    4775  virtual bool Validate() const;
    4776  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    4777  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    4778  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    4779  virtual bool IsEmpty() const;
    4780 
    4781  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    4782  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    4783 
    4784 #if VMA_STATS_STRING_ENABLED
    4785  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    4786 #endif
    4787 
    4788  virtual bool CreateAllocationRequest(
    4789  uint32_t currentFrameIndex,
    4790  uint32_t frameInUseCount,
    4791  VkDeviceSize bufferImageGranularity,
    4792  VkDeviceSize allocSize,
    4793  VkDeviceSize allocAlignment,
    4794  bool upperAddress,
    4795  VmaSuballocationType allocType,
    4796  bool canMakeOtherLost,
    4797  uint32_t strategy,
    4798  VmaAllocationRequest* pAllocationRequest);
    4799 
    4800  virtual bool MakeRequestedAllocationsLost(
    4801  uint32_t currentFrameIndex,
    4802  uint32_t frameInUseCount,
    4803  VmaAllocationRequest* pAllocationRequest);
    4804 
    4805  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4806 
    4807  virtual VkResult CheckCorruption(const void* pBlockData);
    4808 
    4809  virtual void Alloc(
    4810  const VmaAllocationRequest& request,
    4811  VmaSuballocationType type,
    4812  VkDeviceSize allocSize,
    4813  bool upperAddress,
    4814  VmaAllocation hAllocation);
    4815 
    4816  virtual void Free(const VmaAllocation allocation);
    4817  virtual void FreeAtOffset(VkDeviceSize offset);
    4818 
    4819 private:
    4820  uint32_t m_FreeCount;
    4821  VkDeviceSize m_SumFreeSize;
    4822  VmaSuballocationList m_Suballocations;
    4823  // Suballocations that are free and have size greater than certain threshold.
    4824  // Sorted by size, ascending.
    4825  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    4826 
    4827  bool ValidateFreeSuballocationList() const;
    4828 
    4829  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    4830  // If yes, fills pOffset and returns true. If no, returns false.
    4831  bool CheckAllocation(
    4832  uint32_t currentFrameIndex,
    4833  uint32_t frameInUseCount,
    4834  VkDeviceSize bufferImageGranularity,
    4835  VkDeviceSize allocSize,
    4836  VkDeviceSize allocAlignment,
    4837  VmaSuballocationType allocType,
    4838  VmaSuballocationList::const_iterator suballocItem,
    4839  bool canMakeOtherLost,
    4840  VkDeviceSize* pOffset,
    4841  size_t* itemsToMakeLostCount,
    4842  VkDeviceSize* pSumFreeSize,
    4843  VkDeviceSize* pSumItemSize) const;
    4844  // Given free suballocation, it merges it with following one, which must also be free.
    4845  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    4846  // Releases given suballocation, making it free.
    4847  // Merges it with adjacent free suballocations if applicable.
    4848  // Returns iterator to new free suballocation at this place.
    4849  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    4850  // Given free suballocation, it inserts it into sorted list of
    4851  // m_FreeSuballocationsBySize if it's suitable.
    4852  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    4853  // Given free suballocation, it removes it from sorted list of
    4854  // m_FreeSuballocationsBySize if it's suitable.
    4855  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    4856 };
    4857 
    4858 /*
    4859 Allocations and their references in internal data structure look like this:
    4860 
    4861 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
    4862 
    4863  0 +-------+
    4864  | |
    4865  | |
    4866  | |
    4867  +-------+
    4868  | Alloc | 1st[m_1stNullItemsBeginCount]
    4869  +-------+
    4870  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4871  +-------+
    4872  | ... |
    4873  +-------+
    4874  | Alloc | 1st[1st.size() - 1]
    4875  +-------+
    4876  | |
    4877  | |
    4878  | |
    4879 GetSize() +-------+
    4880 
    4881 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
    4882 
    4883  0 +-------+
    4884  | Alloc | 2nd[0]
    4885  +-------+
    4886  | Alloc | 2nd[1]
    4887  +-------+
    4888  | ... |
    4889  +-------+
    4890  | Alloc | 2nd[2nd.size() - 1]
    4891  +-------+
    4892  | |
    4893  | |
    4894  | |
    4895  +-------+
    4896  | Alloc | 1st[m_1stNullItemsBeginCount]
    4897  +-------+
    4898  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4899  +-------+
    4900  | ... |
    4901  +-------+
    4902  | Alloc | 1st[1st.size() - 1]
    4903  +-------+
    4904  | |
    4905 GetSize() +-------+
    4906 
    4907 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
    4908 
    4909  0 +-------+
    4910  | |
    4911  | |
    4912  | |
    4913  +-------+
    4914  | Alloc | 1st[m_1stNullItemsBeginCount]
    4915  +-------+
    4916  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4917  +-------+
    4918  | ... |
    4919  +-------+
    4920  | Alloc | 1st[1st.size() - 1]
    4921  +-------+
    4922  | |
    4923  | |
    4924  | |
    4925  +-------+
    4926  | Alloc | 2nd[2nd.size() - 1]
    4927  +-------+
    4928  | ... |
    4929  +-------+
    4930  | Alloc | 2nd[1]
    4931  +-------+
    4932  | Alloc | 2nd[0]
    4933 GetSize() +-------+
    4934 
    4935 */
    4936 class VmaBlockMetadata_Linear : public VmaBlockMetadata
    4937 {
    4938  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
    4939 public:
    4940  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
    4941  virtual ~VmaBlockMetadata_Linear();
    4942  virtual void Init(VkDeviceSize size);
    4943 
    4944  virtual bool Validate() const;
    4945  virtual size_t GetAllocationCount() const;
    4946  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    4947  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    4948  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
    4949 
    4950  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    4951  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    4952 
    4953 #if VMA_STATS_STRING_ENABLED
    4954  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    4955 #endif
    4956 
    4957  virtual bool CreateAllocationRequest(
    4958  uint32_t currentFrameIndex,
    4959  uint32_t frameInUseCount,
    4960  VkDeviceSize bufferImageGranularity,
    4961  VkDeviceSize allocSize,
    4962  VkDeviceSize allocAlignment,
    4963  bool upperAddress,
    4964  VmaSuballocationType allocType,
    4965  bool canMakeOtherLost,
    4966  uint32_t strategy,
    4967  VmaAllocationRequest* pAllocationRequest);
    4968 
    4969  virtual bool MakeRequestedAllocationsLost(
    4970  uint32_t currentFrameIndex,
    4971  uint32_t frameInUseCount,
    4972  VmaAllocationRequest* pAllocationRequest);
    4973 
    4974  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4975 
    4976  virtual VkResult CheckCorruption(const void* pBlockData);
    4977 
    4978  virtual void Alloc(
    4979  const VmaAllocationRequest& request,
    4980  VmaSuballocationType type,
    4981  VkDeviceSize allocSize,
    4982  bool upperAddress,
    4983  VmaAllocation hAllocation);
    4984 
    4985  virtual void Free(const VmaAllocation allocation);
    4986  virtual void FreeAtOffset(VkDeviceSize offset);
    4987 
    4988 private:
    4989  /*
    4990  There are two suballocation vectors, used in ping-pong way.
    4991  The one with index m_1stVectorIndex is called 1st.
    4992  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
    4993  2nd can be non-empty only when 1st is not empty.
    4994  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
    4995  */
    4996  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
    4997 
    4998  enum SECOND_VECTOR_MODE
    4999  {
    5000  SECOND_VECTOR_EMPTY,
    5001  /*
    5002  Suballocations in 2nd vector are created later than the ones in 1st, but they
    5003  all have smaller offset.
    5004  */
    5005  SECOND_VECTOR_RING_BUFFER,
    5006  /*
    5007  Suballocations in 2nd vector are upper side of double stack.
    5008  They all have offsets higher than those in 1st vector.
    5009  Top of this stack means smaller offsets, but higher indices in this vector.
    5010  */
    5011  SECOND_VECTOR_DOUBLE_STACK,
    5012  };
    5013 
    5014  VkDeviceSize m_SumFreeSize;
    5015  SuballocationVectorType m_Suballocations0, m_Suballocations1;
    5016  uint32_t m_1stVectorIndex;
    5017  SECOND_VECTOR_MODE m_2ndVectorMode;
    5018 
    5019  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5020  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5021  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5022  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5023 
    5024  // Number of items in 1st vector with hAllocation = null at the beginning.
    5025  size_t m_1stNullItemsBeginCount;
    5026  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
    5027  size_t m_1stNullItemsMiddleCount;
    5028  // Number of items in 2nd vector with hAllocation = null.
    5029  size_t m_2ndNullItemsCount;
    5030 
    5031  bool ShouldCompact1st() const;
    5032  void CleanupAfterFree();
    5033 };
    5034 
    5035 /*
    5036 - GetSize() is the original size of allocated memory block.
    5037 - m_UsableSize is this size aligned down to a power of two.
    5038  All allocations and calculations happen relative to m_UsableSize.
    5039 - GetUnusableSize() is the difference between them.
    5040  It is repoted as separate, unused range, not available for allocations.
    5041 
    5042 Node at level 0 has size = m_UsableSize.
    5043 Each next level contains nodes with size 2 times smaller than current level.
    5044 m_LevelCount is the maximum number of levels to use in the current object.
    5045 */
    5046 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
    5047 {
    5048  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
    5049 public:
    5050  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
    5051  virtual ~VmaBlockMetadata_Buddy();
    5052  virtual void Init(VkDeviceSize size);
    5053 
    5054  virtual bool Validate() const;
    5055  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
    5056  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
    5057  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5058  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
    5059 
    5060  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5061  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5062 
    5063 #if VMA_STATS_STRING_ENABLED
    5064  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5065 #endif
    5066 
    5067  virtual bool CreateAllocationRequest(
    5068  uint32_t currentFrameIndex,
    5069  uint32_t frameInUseCount,
    5070  VkDeviceSize bufferImageGranularity,
    5071  VkDeviceSize allocSize,
    5072  VkDeviceSize allocAlignment,
    5073  bool upperAddress,
    5074  VmaSuballocationType allocType,
    5075  bool canMakeOtherLost,
    5076  uint32_t strategy,
    5077  VmaAllocationRequest* pAllocationRequest);
    5078 
    5079  virtual bool MakeRequestedAllocationsLost(
    5080  uint32_t currentFrameIndex,
    5081  uint32_t frameInUseCount,
    5082  VmaAllocationRequest* pAllocationRequest);
    5083 
    5084  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5085 
    5086  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
    5087 
    5088  virtual void Alloc(
    5089  const VmaAllocationRequest& request,
    5090  VmaSuballocationType type,
    5091  VkDeviceSize allocSize,
    5092  bool upperAddress,
    5093  VmaAllocation hAllocation);
    5094 
    5095  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
    5096  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
    5097 
    5098 private:
    5099  static const VkDeviceSize MIN_NODE_SIZE = 32;
    5100  static const size_t MAX_LEVELS = 30;
    5101 
    5102  struct ValidationContext
    5103  {
    5104  size_t calculatedAllocationCount;
    5105  size_t calculatedFreeCount;
    5106  VkDeviceSize calculatedSumFreeSize;
    5107 
    5108  ValidationContext() :
    5109  calculatedAllocationCount(0),
    5110  calculatedFreeCount(0),
    5111  calculatedSumFreeSize(0) { }
    5112  };
    5113 
    5114  struct Node
    5115  {
    5116  VkDeviceSize offset;
    5117  enum TYPE
    5118  {
    5119  TYPE_FREE,
    5120  TYPE_ALLOCATION,
    5121  TYPE_SPLIT,
    5122  TYPE_COUNT
    5123  } type;
    5124  Node* parent;
    5125  Node* buddy;
    5126 
    5127  union
    5128  {
    5129  struct
    5130  {
    5131  Node* prev;
    5132  Node* next;
    5133  } free;
    5134  struct
    5135  {
    5136  VmaAllocation alloc;
    5137  } allocation;
    5138  struct
    5139  {
    5140  Node* leftChild;
    5141  } split;
    5142  };
    5143  };
    5144 
    5145  // Size of the memory block aligned down to a power of two.
    5146  VkDeviceSize m_UsableSize;
    5147  uint32_t m_LevelCount;
    5148 
    5149  Node* m_Root;
    5150  struct {
    5151  Node* front;
    5152  Node* back;
    5153  } m_FreeList[MAX_LEVELS];
    5154  // Number of nodes in the tree with type == TYPE_ALLOCATION.
    5155  size_t m_AllocationCount;
    5156  // Number of nodes in the tree with type == TYPE_FREE.
    5157  size_t m_FreeCount;
    5158  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
    5159  VkDeviceSize m_SumFreeSize;
    5160 
    5161  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
    5162  void DeleteNode(Node* node);
    5163  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
    5164  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
    5165  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
    5166  // Alloc passed just for validation. Can be null.
    5167  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
    5168  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
    5169  // Adds node to the front of FreeList at given level.
    5170  // node->type must be FREE.
    5171  // node->free.prev, next can be undefined.
    5172  void AddToFreeListFront(uint32_t level, Node* node);
    5173  // Removes node from FreeList at given level.
    5174  // node->type must be FREE.
    5175  // node->free.prev, next stay untouched.
    5176  void RemoveFromFreeList(uint32_t level, Node* node);
    5177 
    5178 #if VMA_STATS_STRING_ENABLED
    5179  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
    5180 #endif
    5181 };
    5182 
    5183 /*
    5184 Represents a single block of device memory (`VkDeviceMemory`) with all the
    5185 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
    5186 
    5187 Thread-safety: This class must be externally synchronized.
    5188 */
    5189 class VmaDeviceMemoryBlock
    5190 {
    5191  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
    5192 public:
    5193  VmaBlockMetadata* m_pMetadata;
    5194 
    5195  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    5196 
    5197  ~VmaDeviceMemoryBlock()
    5198  {
    5199  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    5200  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5201  }
    5202 
    5203  // Always call after construction.
    5204  void Init(
    5205  VmaAllocator hAllocator,
    5206  uint32_t newMemoryTypeIndex,
    5207  VkDeviceMemory newMemory,
    5208  VkDeviceSize newSize,
    5209  uint32_t id,
    5210  uint32_t algorithm);
    5211  // Always call before destruction.
    5212  void Destroy(VmaAllocator allocator);
    5213 
    5214  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
    5215  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5216  uint32_t GetId() const { return m_Id; }
    5217  void* GetMappedData() const { return m_pMappedData; }
    5218 
    5219  // Validates all data structures inside this object. If not valid, returns false.
    5220  bool Validate() const;
    5221 
    5222  VkResult CheckCorruption(VmaAllocator hAllocator);
    5223 
    5224  // ppData can be null.
    5225  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
    5226  void Unmap(VmaAllocator hAllocator, uint32_t count);
    5227 
    5228  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5229  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5230 
    5231  VkResult BindBufferMemory(
    5232  const VmaAllocator hAllocator,
    5233  const VmaAllocation hAllocation,
    5234  VkBuffer hBuffer);
    5235  VkResult BindImageMemory(
    5236  const VmaAllocator hAllocator,
    5237  const VmaAllocation hAllocation,
    5238  VkImage hImage);
    5239 
    5240 private:
    5241  uint32_t m_MemoryTypeIndex;
    5242  uint32_t m_Id;
    5243  VkDeviceMemory m_hMemory;
    5244 
    5245  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
    5246  // Also protects m_MapCount, m_pMappedData.
    5247  VMA_MUTEX m_Mutex;
    5248  uint32_t m_MapCount;
    5249  void* m_pMappedData;
    5250 };
    5251 
    5252 struct VmaPointerLess
    5253 {
    5254  bool operator()(const void* lhs, const void* rhs) const
    5255  {
    5256  return lhs < rhs;
    5257  }
    5258 };
    5259 
    5260 class VmaDefragmentator;
    5261 
    5262 /*
    5263 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    5264 Vulkan memory type.
    5265 
    5266 Synchronized internally with a mutex.
    5267 */
    5268 struct VmaBlockVector
    5269 {
    5270  VMA_CLASS_NO_COPY(VmaBlockVector)
    5271 public:
    5272  VmaBlockVector(
    5273  VmaAllocator hAllocator,
    5274  uint32_t memoryTypeIndex,
    5275  VkDeviceSize preferredBlockSize,
    5276  size_t minBlockCount,
    5277  size_t maxBlockCount,
    5278  VkDeviceSize bufferImageGranularity,
    5279  uint32_t frameInUseCount,
    5280  bool isCustomPool,
    5281  bool explicitBlockSize,
    5282  uint32_t algorithm);
    5283  ~VmaBlockVector();
    5284 
    5285  VkResult CreateMinBlocks();
    5286 
    5287  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5288  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    5289  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    5290  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    5291  uint32_t GetAlgorithm() const { return m_Algorithm; }
    5292 
    5293  void GetPoolStats(VmaPoolStats* pStats);
    5294 
    5295  bool IsEmpty() const { return m_Blocks.empty(); }
    5296  bool IsCorruptionDetectionEnabled() const;
    5297 
    5298  VkResult Allocate(
    5299  VmaPool hCurrentPool,
    5300  uint32_t currentFrameIndex,
    5301  VkDeviceSize size,
    5302  VkDeviceSize alignment,
    5303  const VmaAllocationCreateInfo& createInfo,
    5304  VmaSuballocationType suballocType,
    5305  VmaAllocation* pAllocation);
    5306 
    5307  void Free(
    5308  VmaAllocation hAllocation);
    5309 
    5310  // Adds statistics of this BlockVector to pStats.
    5311  void AddStats(VmaStats* pStats);
    5312 
    5313 #if VMA_STATS_STRING_ENABLED
    5314  void PrintDetailedMap(class VmaJsonWriter& json);
    5315 #endif
    5316 
    5317  void MakePoolAllocationsLost(
    5318  uint32_t currentFrameIndex,
    5319  size_t* pLostAllocationCount);
    5320  VkResult CheckCorruption();
    5321 
    5322  VmaDefragmentator* EnsureDefragmentator(
    5323  VmaAllocator hAllocator,
    5324  uint32_t currentFrameIndex);
    5325 
    5326  VkResult Defragment(
    5327  VmaDefragmentationStats* pDefragmentationStats,
    5328  VkDeviceSize& maxBytesToMove,
    5329  uint32_t& maxAllocationsToMove);
    5330 
    5331  void DestroyDefragmentator();
    5332 
    5333 private:
    5334  friend class VmaDefragmentator;
    5335 
    5336  const VmaAllocator m_hAllocator;
    5337  const uint32_t m_MemoryTypeIndex;
    5338  const VkDeviceSize m_PreferredBlockSize;
    5339  const size_t m_MinBlockCount;
    5340  const size_t m_MaxBlockCount;
    5341  const VkDeviceSize m_BufferImageGranularity;
    5342  const uint32_t m_FrameInUseCount;
    5343  const bool m_IsCustomPool;
    5344  const bool m_ExplicitBlockSize;
    5345  const uint32_t m_Algorithm;
    5346  bool m_HasEmptyBlock;
    5347  VMA_MUTEX m_Mutex;
    5348  // Incrementally sorted by sumFreeSize, ascending.
    5349  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    5350  /* There can be at most one allocation that is completely empty - a
    5351  hysteresis to avoid pessimistic case of alternating creation and destruction
    5352  of a VkDeviceMemory. */
    5353  VmaDefragmentator* m_pDefragmentator;
    5354  uint32_t m_NextBlockId;
    5355 
    5356  VkDeviceSize CalcMaxBlockSize() const;
    5357 
    5358  // Finds and removes given block from vector.
    5359  void Remove(VmaDeviceMemoryBlock* pBlock);
    5360 
    5361  // Performs single step in sorting m_Blocks. They may not be fully sorted
    5362  // after this call.
    5363  void IncrementallySortBlocks();
    5364 
    5365  // To be used only without CAN_MAKE_OTHER_LOST flag.
    5366  VkResult AllocateFromBlock(
    5367  VmaDeviceMemoryBlock* pBlock,
    5368  VmaPool hCurrentPool,
    5369  uint32_t currentFrameIndex,
    5370  VkDeviceSize size,
    5371  VkDeviceSize alignment,
    5372  VmaAllocationCreateFlags allocFlags,
    5373  void* pUserData,
    5374  VmaSuballocationType suballocType,
    5375  uint32_t strategy,
    5376  VmaAllocation* pAllocation);
    5377 
    5378  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    5379 };
    5380 
    5381 struct VmaPool_T
    5382 {
    5383  VMA_CLASS_NO_COPY(VmaPool_T)
    5384 public:
    5385  VmaBlockVector m_BlockVector;
    5386 
    5387  VmaPool_T(
    5388  VmaAllocator hAllocator,
    5389  const VmaPoolCreateInfo& createInfo,
    5390  VkDeviceSize preferredBlockSize);
    5391  ~VmaPool_T();
    5392 
    5393  uint32_t GetId() const { return m_Id; }
    5394  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
    5395 
    5396 #if VMA_STATS_STRING_ENABLED
    5397  //void PrintDetailedMap(class VmaStringBuilder& sb);
    5398 #endif
    5399 
    5400 private:
    5401  uint32_t m_Id;
    5402 };
    5403 
    5404 class VmaDefragmentator
    5405 {
    5406  VMA_CLASS_NO_COPY(VmaDefragmentator)
    5407 private:
    5408  const VmaAllocator m_hAllocator;
    5409  VmaBlockVector* const m_pBlockVector;
    5410  uint32_t m_CurrentFrameIndex;
    5411  VkDeviceSize m_BytesMoved;
    5412  uint32_t m_AllocationsMoved;
    5413 
    5414  struct AllocationInfo
    5415  {
    5416  VmaAllocation m_hAllocation;
    5417  VkBool32* m_pChanged;
    5418 
    5419  AllocationInfo() :
    5420  m_hAllocation(VK_NULL_HANDLE),
    5421  m_pChanged(VMA_NULL)
    5422  {
    5423  }
    5424  };
    5425 
    5426  struct AllocationInfoSizeGreater
    5427  {
    5428  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    5429  {
    5430  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    5431  }
    5432  };
    5433 
    5434  // Used between AddAllocation and Defragment.
    5435  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5436 
    5437  struct BlockInfo
    5438  {
    5439  VmaDeviceMemoryBlock* m_pBlock;
    5440  bool m_HasNonMovableAllocations;
    5441  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5442 
    5443  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    5444  m_pBlock(VMA_NULL),
    5445  m_HasNonMovableAllocations(true),
    5446  m_Allocations(pAllocationCallbacks),
    5447  m_pMappedDataForDefragmentation(VMA_NULL)
    5448  {
    5449  }
    5450 
    5451  void CalcHasNonMovableAllocations()
    5452  {
    5453  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
    5454  const size_t defragmentAllocCount = m_Allocations.size();
    5455  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    5456  }
    5457 
    5458  void SortAllocationsBySizeDescecnding()
    5459  {
    5460  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    5461  }
    5462 
    5463  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
    5464  void Unmap(VmaAllocator hAllocator);
    5465 
    5466  private:
    5467  // Not null if mapped for defragmentation only, not originally mapped.
    5468  void* m_pMappedDataForDefragmentation;
    5469  };
    5470 
    5471  struct BlockPointerLess
    5472  {
    5473  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    5474  {
    5475  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    5476  }
    5477  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5478  {
    5479  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    5480  }
    5481  };
    5482 
    5483  // 1. Blocks with some non-movable allocations go first.
    5484  // 2. Blocks with smaller sumFreeSize go first.
    5485  struct BlockInfoCompareMoveDestination
    5486  {
    5487  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5488  {
    5489  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    5490  {
    5491  return true;
    5492  }
    5493  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    5494  {
    5495  return false;
    5496  }
    5497  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
    5498  {
    5499  return true;
    5500  }
    5501  return false;
    5502  }
    5503  };
    5504 
    5505  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    5506  BlockInfoVector m_Blocks;
    5507 
    5508  VkResult DefragmentRound(
    5509  VkDeviceSize maxBytesToMove,
    5510  uint32_t maxAllocationsToMove);
    5511 
    5512  static bool MoveMakesSense(
    5513  size_t dstBlockIndex, VkDeviceSize dstOffset,
    5514  size_t srcBlockIndex, VkDeviceSize srcOffset);
    5515 
    5516 public:
    5517  VmaDefragmentator(
    5518  VmaAllocator hAllocator,
    5519  VmaBlockVector* pBlockVector,
    5520  uint32_t currentFrameIndex);
    5521 
    5522  ~VmaDefragmentator();
    5523 
    5524  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    5525  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    5526 
    5527  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    5528 
    5529  VkResult Defragment(
    5530  VkDeviceSize maxBytesToMove,
    5531  uint32_t maxAllocationsToMove);
    5532 };
    5533 
    5534 #if VMA_RECORDING_ENABLED
    5535 
    5536 class VmaRecorder
    5537 {
    5538 public:
    5539  VmaRecorder();
    5540  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
    5541  void WriteConfiguration(
    5542  const VkPhysicalDeviceProperties& devProps,
    5543  const VkPhysicalDeviceMemoryProperties& memProps,
    5544  bool dedicatedAllocationExtensionEnabled);
    5545  ~VmaRecorder();
    5546 
    5547  void RecordCreateAllocator(uint32_t frameIndex);
    5548  void RecordDestroyAllocator(uint32_t frameIndex);
    5549  void RecordCreatePool(uint32_t frameIndex,
    5550  const VmaPoolCreateInfo& createInfo,
    5551  VmaPool pool);
    5552  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
    5553  void RecordAllocateMemory(uint32_t frameIndex,
    5554  const VkMemoryRequirements& vkMemReq,
    5555  const VmaAllocationCreateInfo& createInfo,
    5556  VmaAllocation allocation);
    5557  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    5558  const VkMemoryRequirements& vkMemReq,
    5559  bool requiresDedicatedAllocation,
    5560  bool prefersDedicatedAllocation,
    5561  const VmaAllocationCreateInfo& createInfo,
    5562  VmaAllocation allocation);
    5563  void RecordAllocateMemoryForImage(uint32_t frameIndex,
    5564  const VkMemoryRequirements& vkMemReq,
    5565  bool requiresDedicatedAllocation,
    5566  bool prefersDedicatedAllocation,
    5567  const VmaAllocationCreateInfo& createInfo,
    5568  VmaAllocation allocation);
    5569  void RecordFreeMemory(uint32_t frameIndex,
    5570  VmaAllocation allocation);
    5571  void RecordSetAllocationUserData(uint32_t frameIndex,
    5572  VmaAllocation allocation,
    5573  const void* pUserData);
    5574  void RecordCreateLostAllocation(uint32_t frameIndex,
    5575  VmaAllocation allocation);
    5576  void RecordMapMemory(uint32_t frameIndex,
    5577  VmaAllocation allocation);
    5578  void RecordUnmapMemory(uint32_t frameIndex,
    5579  VmaAllocation allocation);
    5580  void RecordFlushAllocation(uint32_t frameIndex,
    5581  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5582  void RecordInvalidateAllocation(uint32_t frameIndex,
    5583  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5584  void RecordCreateBuffer(uint32_t frameIndex,
    5585  const VkBufferCreateInfo& bufCreateInfo,
    5586  const VmaAllocationCreateInfo& allocCreateInfo,
    5587  VmaAllocation allocation);
    5588  void RecordCreateImage(uint32_t frameIndex,
    5589  const VkImageCreateInfo& imageCreateInfo,
    5590  const VmaAllocationCreateInfo& allocCreateInfo,
    5591  VmaAllocation allocation);
    5592  void RecordDestroyBuffer(uint32_t frameIndex,
    5593  VmaAllocation allocation);
    5594  void RecordDestroyImage(uint32_t frameIndex,
    5595  VmaAllocation allocation);
    5596  void RecordTouchAllocation(uint32_t frameIndex,
    5597  VmaAllocation allocation);
    5598  void RecordGetAllocationInfo(uint32_t frameIndex,
    5599  VmaAllocation allocation);
    5600  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
    5601  VmaPool pool);
    5602 
    5603 private:
    5604  struct CallParams
    5605  {
    5606  uint32_t threadId;
    5607  double time;
    5608  };
    5609 
    5610  class UserDataString
    5611  {
    5612  public:
    5613  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
    5614  const char* GetString() const { return m_Str; }
    5615 
    5616  private:
    5617  char m_PtrStr[17];
    5618  const char* m_Str;
    5619  };
    5620 
    5621  bool m_UseMutex;
    5622  VmaRecordFlags m_Flags;
    5623  FILE* m_File;
    5624  VMA_MUTEX m_FileMutex;
    5625  int64_t m_Freq;
    5626  int64_t m_StartCounter;
    5627 
    5628  void GetBasicParams(CallParams& outParams);
    5629  void Flush();
    5630 };
    5631 
    5632 #endif // #if VMA_RECORDING_ENABLED
    5633 
    5634 // Main allocator object.
    5635 struct VmaAllocator_T
    5636 {
    5637  VMA_CLASS_NO_COPY(VmaAllocator_T)
    5638 public:
    5639  bool m_UseMutex;
    5640  bool m_UseKhrDedicatedAllocation;
    5641  VkDevice m_hDevice;
    5642  bool m_AllocationCallbacksSpecified;
    5643  VkAllocationCallbacks m_AllocationCallbacks;
    5644  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    5645 
    5646  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
    5647  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    5648  VMA_MUTEX m_HeapSizeLimitMutex;
    5649 
    5650  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    5651  VkPhysicalDeviceMemoryProperties m_MemProps;
    5652 
    5653  // Default pools.
    5654  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    5655 
    5656  // Each vector is sorted by memory (handle value).
    5657  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    5658  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    5659  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    5660 
    5661  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    5662  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
    5663  ~VmaAllocator_T();
    5664 
    5665  const VkAllocationCallbacks* GetAllocationCallbacks() const
    5666  {
    5667  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    5668  }
    5669  const VmaVulkanFunctions& GetVulkanFunctions() const
    5670  {
    5671  return m_VulkanFunctions;
    5672  }
    5673 
    5674  VkDeviceSize GetBufferImageGranularity() const
    5675  {
    5676  return VMA_MAX(
    5677  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    5678  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    5679  }
    5680 
    5681  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    5682  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    5683 
    5684  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    5685  {
    5686  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    5687  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    5688  }
    5689  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
    5690  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
    5691  {
    5692  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
    5693  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    5694  }
    5695  // Minimum alignment for all allocations in specific memory type.
    5696  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
    5697  {
    5698  return IsMemoryTypeNonCoherent(memTypeIndex) ?
    5699  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
    5700  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
    5701  }
    5702 
    5703  bool IsIntegratedGpu() const
    5704  {
    5705  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
    5706  }
    5707 
    5708 #if VMA_RECORDING_ENABLED
    5709  VmaRecorder* GetRecorder() const { return m_pRecorder; }
    5710 #endif
    5711 
    5712  void GetBufferMemoryRequirements(
    5713  VkBuffer hBuffer,
    5714  VkMemoryRequirements& memReq,
    5715  bool& requiresDedicatedAllocation,
    5716  bool& prefersDedicatedAllocation) const;
    5717  void GetImageMemoryRequirements(
    5718  VkImage hImage,
    5719  VkMemoryRequirements& memReq,
    5720  bool& requiresDedicatedAllocation,
    5721  bool& prefersDedicatedAllocation) const;
    5722 
    5723  // Main allocation function.
    5724  VkResult AllocateMemory(
    5725  const VkMemoryRequirements& vkMemReq,
    5726  bool requiresDedicatedAllocation,
    5727  bool prefersDedicatedAllocation,
    5728  VkBuffer dedicatedBuffer,
    5729  VkImage dedicatedImage,
    5730  const VmaAllocationCreateInfo& createInfo,
    5731  VmaSuballocationType suballocType,
    5732  VmaAllocation* pAllocation);
    5733 
    5734  // Main deallocation function.
    5735  void FreeMemory(const VmaAllocation allocation);
    5736 
    5737  void CalculateStats(VmaStats* pStats);
    5738 
    5739 #if VMA_STATS_STRING_ENABLED
    5740  void PrintDetailedMap(class VmaJsonWriter& json);
    5741 #endif
    5742 
    5743  VkResult Defragment(
    5744  VmaAllocation* pAllocations,
    5745  size_t allocationCount,
    5746  VkBool32* pAllocationsChanged,
    5747  const VmaDefragmentationInfo* pDefragmentationInfo,
    5748  VmaDefragmentationStats* pDefragmentationStats);
    5749 
    5750  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    5751  bool TouchAllocation(VmaAllocation hAllocation);
    5752 
    5753  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    5754  void DestroyPool(VmaPool pool);
    5755  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    5756 
    5757  void SetCurrentFrameIndex(uint32_t frameIndex);
    5758  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
    5759 
    5760  void MakePoolAllocationsLost(
    5761  VmaPool hPool,
    5762  size_t* pLostAllocationCount);
    5763  VkResult CheckPoolCorruption(VmaPool hPool);
    5764  VkResult CheckCorruption(uint32_t memoryTypeBits);
    5765 
    5766  void CreateLostAllocation(VmaAllocation* pAllocation);
    5767 
    5768  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    5769  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    5770 
    5771  VkResult Map(VmaAllocation hAllocation, void** ppData);
    5772  void Unmap(VmaAllocation hAllocation);
    5773 
    5774  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
    5775  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
    5776 
    5777  void FlushOrInvalidateAllocation(
    5778  VmaAllocation hAllocation,
    5779  VkDeviceSize offset, VkDeviceSize size,
    5780  VMA_CACHE_OPERATION op);
    5781 
    5782  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
    5783 
    5784 private:
    5785  VkDeviceSize m_PreferredLargeHeapBlockSize;
    5786 
    5787  VkPhysicalDevice m_PhysicalDevice;
    5788  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    5789 
    5790  VMA_MUTEX m_PoolsMutex;
    5791  // Protected by m_PoolsMutex. Sorted by pointer value.
    5792  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    5793  uint32_t m_NextPoolId;
    5794 
    5795  VmaVulkanFunctions m_VulkanFunctions;
    5796 
    5797 #if VMA_RECORDING_ENABLED
    5798  VmaRecorder* m_pRecorder;
    5799 #endif
    5800 
    5801  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    5802 
    5803  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    5804 
    5805  VkResult AllocateMemoryOfType(
    5806  VkDeviceSize size,
    5807  VkDeviceSize alignment,
    5808  bool dedicatedAllocation,
    5809  VkBuffer dedicatedBuffer,
    5810  VkImage dedicatedImage,
    5811  const VmaAllocationCreateInfo& createInfo,
    5812  uint32_t memTypeIndex,
    5813  VmaSuballocationType suballocType,
    5814  VmaAllocation* pAllocation);
    5815 
    5816  // Allocates and registers new VkDeviceMemory specifically for single allocation.
    5817  VkResult AllocateDedicatedMemory(
    5818  VkDeviceSize size,
    5819  VmaSuballocationType suballocType,
    5820  uint32_t memTypeIndex,
    5821  bool map,
    5822  bool isUserDataString,
    5823  void* pUserData,
    5824  VkBuffer dedicatedBuffer,
    5825  VkImage dedicatedImage,
    5826  VmaAllocation* pAllocation);
    5827 
    5828  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    5829  void FreeDedicatedMemory(VmaAllocation allocation);
    5830 };
    5831 
    5833 // Memory allocation #2 after VmaAllocator_T definition
    5834 
    5835 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    5836 {
    5837  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    5838 }
    5839 
    5840 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    5841 {
    5842  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    5843 }
    5844 
    5845 template<typename T>
    5846 static T* VmaAllocate(VmaAllocator hAllocator)
    5847 {
    5848  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    5849 }
    5850 
    5851 template<typename T>
    5852 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    5853 {
    5854  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    5855 }
    5856 
    5857 template<typename T>
    5858 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    5859 {
    5860  if(ptr != VMA_NULL)
    5861  {
    5862  ptr->~T();
    5863  VmaFree(hAllocator, ptr);
    5864  }
    5865 }
    5866 
    5867 template<typename T>
    5868 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    5869 {
    5870  if(ptr != VMA_NULL)
    5871  {
    5872  for(size_t i = count; i--; )
    5873  ptr[i].~T();
    5874  VmaFree(hAllocator, ptr);
    5875  }
    5876 }
    5877 
    5879 // VmaStringBuilder
    5880 
    5881 #if VMA_STATS_STRING_ENABLED
    5882 
    5883 class VmaStringBuilder
    5884 {
    5885 public:
    5886  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    5887  size_t GetLength() const { return m_Data.size(); }
    5888  const char* GetData() const { return m_Data.data(); }
    5889 
    5890  void Add(char ch) { m_Data.push_back(ch); }
    5891  void Add(const char* pStr);
    5892  void AddNewLine() { Add('\n'); }
    5893  void AddNumber(uint32_t num);
    5894  void AddNumber(uint64_t num);
    5895  void AddPointer(const void* ptr);
    5896 
    5897 private:
    5898  VmaVector< char, VmaStlAllocator<char> > m_Data;
    5899 };
    5900 
    5901 void VmaStringBuilder::Add(const char* pStr)
    5902 {
    5903  const size_t strLen = strlen(pStr);
    5904  if(strLen > 0)
    5905  {
    5906  const size_t oldCount = m_Data.size();
    5907  m_Data.resize(oldCount + strLen);
    5908  memcpy(m_Data.data() + oldCount, pStr, strLen);
    5909  }
    5910 }
    5911 
    5912 void VmaStringBuilder::AddNumber(uint32_t num)
    5913 {
    5914  char buf[11];
    5915  VmaUint32ToStr(buf, sizeof(buf), num);
    5916  Add(buf);
    5917 }
    5918 
    5919 void VmaStringBuilder::AddNumber(uint64_t num)
    5920 {
    5921  char buf[21];
    5922  VmaUint64ToStr(buf, sizeof(buf), num);
    5923  Add(buf);
    5924 }
    5925 
    5926 void VmaStringBuilder::AddPointer(const void* ptr)
    5927 {
    5928  char buf[21];
    5929  VmaPtrToStr(buf, sizeof(buf), ptr);
    5930  Add(buf);
    5931 }
    5932 
    5933 #endif // #if VMA_STATS_STRING_ENABLED
    5934 
    5936 // VmaJsonWriter
    5937 
    5938 #if VMA_STATS_STRING_ENABLED
    5939 
    5940 class VmaJsonWriter
    5941 {
    5942  VMA_CLASS_NO_COPY(VmaJsonWriter)
    5943 public:
    5944  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    5945  ~VmaJsonWriter();
    5946 
    5947  void BeginObject(bool singleLine = false);
    5948  void EndObject();
    5949 
    5950  void BeginArray(bool singleLine = false);
    5951  void EndArray();
    5952 
    5953  void WriteString(const char* pStr);
    5954  void BeginString(const char* pStr = VMA_NULL);
    5955  void ContinueString(const char* pStr);
    5956  void ContinueString(uint32_t n);
    5957  void ContinueString(uint64_t n);
    5958  void ContinueString_Pointer(const void* ptr);
    5959  void EndString(const char* pStr = VMA_NULL);
    5960 
    5961  void WriteNumber(uint32_t n);
    5962  void WriteNumber(uint64_t n);
    5963  void WriteBool(bool b);
    5964  void WriteNull();
    5965 
    5966 private:
    5967  static const char* const INDENT;
    5968 
    5969  enum COLLECTION_TYPE
    5970  {
    5971  COLLECTION_TYPE_OBJECT,
    5972  COLLECTION_TYPE_ARRAY,
    5973  };
    5974  struct StackItem
    5975  {
    5976  COLLECTION_TYPE type;
    5977  uint32_t valueCount;
    5978  bool singleLineMode;
    5979  };
    5980 
    5981  VmaStringBuilder& m_SB;
    5982  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    5983  bool m_InsideString;
    5984 
    5985  void BeginValue(bool isString);
    5986  void WriteIndent(bool oneLess = false);
    5987 };
    5988 
    5989 const char* const VmaJsonWriter::INDENT = " ";
    5990 
    5991 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    5992  m_SB(sb),
    5993  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    5994  m_InsideString(false)
    5995 {
    5996 }
    5997 
    5998 VmaJsonWriter::~VmaJsonWriter()
    5999 {
    6000  VMA_ASSERT(!m_InsideString);
    6001  VMA_ASSERT(m_Stack.empty());
    6002 }
    6003 
    6004 void VmaJsonWriter::BeginObject(bool singleLine)
    6005 {
    6006  VMA_ASSERT(!m_InsideString);
    6007 
    6008  BeginValue(false);
    6009  m_SB.Add('{');
    6010 
    6011  StackItem item;
    6012  item.type = COLLECTION_TYPE_OBJECT;
    6013  item.valueCount = 0;
    6014  item.singleLineMode = singleLine;
    6015  m_Stack.push_back(item);
    6016 }
    6017 
    6018 void VmaJsonWriter::EndObject()
    6019 {
    6020  VMA_ASSERT(!m_InsideString);
    6021 
    6022  WriteIndent(true);
    6023  m_SB.Add('}');
    6024 
    6025  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    6026  m_Stack.pop_back();
    6027 }
    6028 
    6029 void VmaJsonWriter::BeginArray(bool singleLine)
    6030 {
    6031  VMA_ASSERT(!m_InsideString);
    6032 
    6033  BeginValue(false);
    6034  m_SB.Add('[');
    6035 
    6036  StackItem item;
    6037  item.type = COLLECTION_TYPE_ARRAY;
    6038  item.valueCount = 0;
    6039  item.singleLineMode = singleLine;
    6040  m_Stack.push_back(item);
    6041 }
    6042 
    6043 void VmaJsonWriter::EndArray()
    6044 {
    6045  VMA_ASSERT(!m_InsideString);
    6046 
    6047  WriteIndent(true);
    6048  m_SB.Add(']');
    6049 
    6050  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    6051  m_Stack.pop_back();
    6052 }
    6053 
    6054 void VmaJsonWriter::WriteString(const char* pStr)
    6055 {
    6056  BeginString(pStr);
    6057  EndString();
    6058 }
    6059 
    6060 void VmaJsonWriter::BeginString(const char* pStr)
    6061 {
    6062  VMA_ASSERT(!m_InsideString);
    6063 
    6064  BeginValue(true);
    6065  m_SB.Add('"');
    6066  m_InsideString = true;
    6067  if(pStr != VMA_NULL && pStr[0] != '\0')
    6068  {
    6069  ContinueString(pStr);
    6070  }
    6071 }
    6072 
    6073 void VmaJsonWriter::ContinueString(const char* pStr)
    6074 {
    6075  VMA_ASSERT(m_InsideString);
    6076 
    6077  const size_t strLen = strlen(pStr);
    6078  for(size_t i = 0; i < strLen; ++i)
    6079  {
    6080  char ch = pStr[i];
    6081  if(ch == '\\')
    6082  {
    6083  m_SB.Add("\\\\");
    6084  }
    6085  else if(ch == '"')
    6086  {
    6087  m_SB.Add("\\\"");
    6088  }
    6089  else if(ch >= 32)
    6090  {
    6091  m_SB.Add(ch);
    6092  }
    6093  else switch(ch)
    6094  {
    6095  case '\b':
    6096  m_SB.Add("\\b");
    6097  break;
    6098  case '\f':
    6099  m_SB.Add("\\f");
    6100  break;
    6101  case '\n':
    6102  m_SB.Add("\\n");
    6103  break;
    6104  case '\r':
    6105  m_SB.Add("\\r");
    6106  break;
    6107  case '\t':
    6108  m_SB.Add("\\t");
    6109  break;
    6110  default:
    6111  VMA_ASSERT(0 && "Character not currently supported.");
    6112  break;
    6113  }
    6114  }
    6115 }
    6116 
    6117 void VmaJsonWriter::ContinueString(uint32_t n)
    6118 {
    6119  VMA_ASSERT(m_InsideString);
    6120  m_SB.AddNumber(n);
    6121 }
    6122 
    6123 void VmaJsonWriter::ContinueString(uint64_t n)
    6124 {
    6125  VMA_ASSERT(m_InsideString);
    6126  m_SB.AddNumber(n);
    6127 }
    6128 
    6129 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
    6130 {
    6131  VMA_ASSERT(m_InsideString);
    6132  m_SB.AddPointer(ptr);
    6133 }
    6134 
    6135 void VmaJsonWriter::EndString(const char* pStr)
    6136 {
    6137  VMA_ASSERT(m_InsideString);
    6138  if(pStr != VMA_NULL && pStr[0] != '\0')
    6139  {
    6140  ContinueString(pStr);
    6141  }
    6142  m_SB.Add('"');
    6143  m_InsideString = false;
    6144 }
    6145 
    6146 void VmaJsonWriter::WriteNumber(uint32_t n)
    6147 {
    6148  VMA_ASSERT(!m_InsideString);
    6149  BeginValue(false);
    6150  m_SB.AddNumber(n);
    6151 }
    6152 
    6153 void VmaJsonWriter::WriteNumber(uint64_t n)
    6154 {
    6155  VMA_ASSERT(!m_InsideString);
    6156  BeginValue(false);
    6157  m_SB.AddNumber(n);
    6158 }
    6159 
    6160 void VmaJsonWriter::WriteBool(bool b)
    6161 {
    6162  VMA_ASSERT(!m_InsideString);
    6163  BeginValue(false);
    6164  m_SB.Add(b ? "true" : "false");
    6165 }
    6166 
    6167 void VmaJsonWriter::WriteNull()
    6168 {
    6169  VMA_ASSERT(!m_InsideString);
    6170  BeginValue(false);
    6171  m_SB.Add("null");
    6172 }
    6173 
    6174 void VmaJsonWriter::BeginValue(bool isString)
    6175 {
    6176  if(!m_Stack.empty())
    6177  {
    6178  StackItem& currItem = m_Stack.back();
    6179  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6180  currItem.valueCount % 2 == 0)
    6181  {
    6182  VMA_ASSERT(isString);
    6183  }
    6184 
    6185  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6186  currItem.valueCount % 2 != 0)
    6187  {
    6188  m_SB.Add(": ");
    6189  }
    6190  else if(currItem.valueCount > 0)
    6191  {
    6192  m_SB.Add(", ");
    6193  WriteIndent();
    6194  }
    6195  else
    6196  {
    6197  WriteIndent();
    6198  }
    6199  ++currItem.valueCount;
    6200  }
    6201 }
    6202 
    6203 void VmaJsonWriter::WriteIndent(bool oneLess)
    6204 {
    6205  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    6206  {
    6207  m_SB.AddNewLine();
    6208 
    6209  size_t count = m_Stack.size();
    6210  if(count > 0 && oneLess)
    6211  {
    6212  --count;
    6213  }
    6214  for(size_t i = 0; i < count; ++i)
    6215  {
    6216  m_SB.Add(INDENT);
    6217  }
    6218  }
    6219 }
    6220 
    6221 #endif // #if VMA_STATS_STRING_ENABLED
    6222 
    6224 
    6225 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
    6226 {
    6227  if(IsUserDataString())
    6228  {
    6229  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
    6230 
    6231  FreeUserDataString(hAllocator);
    6232 
    6233  if(pUserData != VMA_NULL)
    6234  {
    6235  const char* const newStrSrc = (char*)pUserData;
    6236  const size_t newStrLen = strlen(newStrSrc);
    6237  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
    6238  memcpy(newStrDst, newStrSrc, newStrLen + 1);
    6239  m_pUserData = newStrDst;
    6240  }
    6241  }
    6242  else
    6243  {
    6244  m_pUserData = pUserData;
    6245  }
    6246 }
    6247 
    6248 void VmaAllocation_T::ChangeBlockAllocation(
    6249  VmaAllocator hAllocator,
    6250  VmaDeviceMemoryBlock* block,
    6251  VkDeviceSize offset)
    6252 {
    6253  VMA_ASSERT(block != VMA_NULL);
    6254  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6255 
    6256  // Move mapping reference counter from old block to new block.
    6257  if(block != m_BlockAllocation.m_Block)
    6258  {
    6259  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
    6260  if(IsPersistentMap())
    6261  ++mapRefCount;
    6262  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
    6263  block->Map(hAllocator, mapRefCount, VMA_NULL);
    6264  }
    6265 
    6266  m_BlockAllocation.m_Block = block;
    6267  m_BlockAllocation.m_Offset = offset;
    6268 }
    6269 
    6270 VkDeviceSize VmaAllocation_T::GetOffset() const
    6271 {
    6272  switch(m_Type)
    6273  {
    6274  case ALLOCATION_TYPE_BLOCK:
    6275  return m_BlockAllocation.m_Offset;
    6276  case ALLOCATION_TYPE_DEDICATED:
    6277  return 0;
    6278  default:
    6279  VMA_ASSERT(0);
    6280  return 0;
    6281  }
    6282 }
    6283 
    6284 VkDeviceMemory VmaAllocation_T::GetMemory() const
    6285 {
    6286  switch(m_Type)
    6287  {
    6288  case ALLOCATION_TYPE_BLOCK:
    6289  return m_BlockAllocation.m_Block->GetDeviceMemory();
    6290  case ALLOCATION_TYPE_DEDICATED:
    6291  return m_DedicatedAllocation.m_hMemory;
    6292  default:
    6293  VMA_ASSERT(0);
    6294  return VK_NULL_HANDLE;
    6295  }
    6296 }
    6297 
    6298 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    6299 {
    6300  switch(m_Type)
    6301  {
    6302  case ALLOCATION_TYPE_BLOCK:
    6303  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
    6304  case ALLOCATION_TYPE_DEDICATED:
    6305  return m_DedicatedAllocation.m_MemoryTypeIndex;
    6306  default:
    6307  VMA_ASSERT(0);
    6308  return UINT32_MAX;
    6309  }
    6310 }
    6311 
    6312 void* VmaAllocation_T::GetMappedData() const
    6313 {
    6314  switch(m_Type)
    6315  {
    6316  case ALLOCATION_TYPE_BLOCK:
    6317  if(m_MapCount != 0)
    6318  {
    6319  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
    6320  VMA_ASSERT(pBlockData != VMA_NULL);
    6321  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    6322  }
    6323  else
    6324  {
    6325  return VMA_NULL;
    6326  }
    6327  break;
    6328  case ALLOCATION_TYPE_DEDICATED:
    6329  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    6330  return m_DedicatedAllocation.m_pMappedData;
    6331  default:
    6332  VMA_ASSERT(0);
    6333  return VMA_NULL;
    6334  }
    6335 }
    6336 
    6337 bool VmaAllocation_T::CanBecomeLost() const
    6338 {
    6339  switch(m_Type)
    6340  {
    6341  case ALLOCATION_TYPE_BLOCK:
    6342  return m_BlockAllocation.m_CanBecomeLost;
    6343  case ALLOCATION_TYPE_DEDICATED:
    6344  return false;
    6345  default:
    6346  VMA_ASSERT(0);
    6347  return false;
    6348  }
    6349 }
    6350 
    6351 VmaPool VmaAllocation_T::GetPool() const
    6352 {
    6353  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6354  return m_BlockAllocation.m_hPool;
    6355 }
    6356 
    6357 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    6358 {
    6359  VMA_ASSERT(CanBecomeLost());
    6360 
    6361  /*
    6362  Warning: This is a carefully designed algorithm.
    6363  Do not modify unless you really know what you're doing :)
    6364  */
    6365  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    6366  for(;;)
    6367  {
    6368  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    6369  {
    6370  VMA_ASSERT(0);
    6371  return false;
    6372  }
    6373  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    6374  {
    6375  return false;
    6376  }
    6377  else // Last use time earlier than current time.
    6378  {
    6379  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    6380  {
    6381  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    6382  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    6383  return true;
    6384  }
    6385  }
    6386  }
    6387 }
    6388 
    6389 #if VMA_STATS_STRING_ENABLED
    6390 
    6391 // Correspond to values of enum VmaSuballocationType.
    6392 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    6393  "FREE",
    6394  "UNKNOWN",
    6395  "BUFFER",
    6396  "IMAGE_UNKNOWN",
    6397  "IMAGE_LINEAR",
    6398  "IMAGE_OPTIMAL",
    6399 };
    6400 
    6401 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
    6402 {
    6403  json.WriteString("Type");
    6404  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
    6405 
    6406  json.WriteString("Size");
    6407  json.WriteNumber(m_Size);
    6408 
    6409  if(m_pUserData != VMA_NULL)
    6410  {
    6411  json.WriteString("UserData");
    6412  if(IsUserDataString())
    6413  {
    6414  json.WriteString((const char*)m_pUserData);
    6415  }
    6416  else
    6417  {
    6418  json.BeginString();
    6419  json.ContinueString_Pointer(m_pUserData);
    6420  json.EndString();
    6421  }
    6422  }
    6423 
    6424  json.WriteString("CreationFrameIndex");
    6425  json.WriteNumber(m_CreationFrameIndex);
    6426 
    6427  json.WriteString("LastUseFrameIndex");
    6428  json.WriteNumber(GetLastUseFrameIndex());
    6429 
    6430  if(m_BufferImageUsage != 0)
    6431  {
    6432  json.WriteString("Usage");
    6433  json.WriteNumber(m_BufferImageUsage);
    6434  }
    6435 }
    6436 
    6437 #endif
    6438 
    6439 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
    6440 {
    6441  VMA_ASSERT(IsUserDataString());
    6442  if(m_pUserData != VMA_NULL)
    6443  {
    6444  char* const oldStr = (char*)m_pUserData;
    6445  const size_t oldStrLen = strlen(oldStr);
    6446  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
    6447  m_pUserData = VMA_NULL;
    6448  }
    6449 }
    6450 
    6451 void VmaAllocation_T::BlockAllocMap()
    6452 {
    6453  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6454 
    6455  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6456  {
    6457  ++m_MapCount;
    6458  }
    6459  else
    6460  {
    6461  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
    6462  }
    6463 }
    6464 
    6465 void VmaAllocation_T::BlockAllocUnmap()
    6466 {
    6467  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6468 
    6469  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6470  {
    6471  --m_MapCount;
    6472  }
    6473  else
    6474  {
    6475  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
    6476  }
    6477 }
    6478 
    6479 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    6480 {
    6481  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6482 
    6483  if(m_MapCount != 0)
    6484  {
    6485  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6486  {
    6487  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    6488  *ppData = m_DedicatedAllocation.m_pMappedData;
    6489  ++m_MapCount;
    6490  return VK_SUCCESS;
    6491  }
    6492  else
    6493  {
    6494  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    6495  return VK_ERROR_MEMORY_MAP_FAILED;
    6496  }
    6497  }
    6498  else
    6499  {
    6500  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    6501  hAllocator->m_hDevice,
    6502  m_DedicatedAllocation.m_hMemory,
    6503  0, // offset
    6504  VK_WHOLE_SIZE,
    6505  0, // flags
    6506  ppData);
    6507  if(result == VK_SUCCESS)
    6508  {
    6509  m_DedicatedAllocation.m_pMappedData = *ppData;
    6510  m_MapCount = 1;
    6511  }
    6512  return result;
    6513  }
    6514 }
    6515 
    6516 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    6517 {
    6518  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6519 
    6520  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6521  {
    6522  --m_MapCount;
    6523  if(m_MapCount == 0)
    6524  {
    6525  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    6526  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    6527  hAllocator->m_hDevice,
    6528  m_DedicatedAllocation.m_hMemory);
    6529  }
    6530  }
    6531  else
    6532  {
    6533  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    6534  }
    6535 }
    6536 
    6537 #if VMA_STATS_STRING_ENABLED
    6538 
    6539 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    6540 {
    6541  json.BeginObject();
    6542 
    6543  json.WriteString("Blocks");
    6544  json.WriteNumber(stat.blockCount);
    6545 
    6546  json.WriteString("Allocations");
    6547  json.WriteNumber(stat.allocationCount);
    6548 
    6549  json.WriteString("UnusedRanges");
    6550  json.WriteNumber(stat.unusedRangeCount);
    6551 
    6552  json.WriteString("UsedBytes");
    6553  json.WriteNumber(stat.usedBytes);
    6554 
    6555  json.WriteString("UnusedBytes");
    6556  json.WriteNumber(stat.unusedBytes);
    6557 
    6558  if(stat.allocationCount > 1)
    6559  {
    6560  json.WriteString("AllocationSize");
    6561  json.BeginObject(true);
    6562  json.WriteString("Min");
    6563  json.WriteNumber(stat.allocationSizeMin);
    6564  json.WriteString("Avg");
    6565  json.WriteNumber(stat.allocationSizeAvg);
    6566  json.WriteString("Max");
    6567  json.WriteNumber(stat.allocationSizeMax);
    6568  json.EndObject();
    6569  }
    6570 
    6571  if(stat.unusedRangeCount > 1)
    6572  {
    6573  json.WriteString("UnusedRangeSize");
    6574  json.BeginObject(true);
    6575  json.WriteString("Min");
    6576  json.WriteNumber(stat.unusedRangeSizeMin);
    6577  json.WriteString("Avg");
    6578  json.WriteNumber(stat.unusedRangeSizeAvg);
    6579  json.WriteString("Max");
    6580  json.WriteNumber(stat.unusedRangeSizeMax);
    6581  json.EndObject();
    6582  }
    6583 
    6584  json.EndObject();
    6585 }
    6586 
    6587 #endif // #if VMA_STATS_STRING_ENABLED
    6588 
    6589 struct VmaSuballocationItemSizeLess
    6590 {
    6591  bool operator()(
    6592  const VmaSuballocationList::iterator lhs,
    6593  const VmaSuballocationList::iterator rhs) const
    6594  {
    6595  return lhs->size < rhs->size;
    6596  }
    6597  bool operator()(
    6598  const VmaSuballocationList::iterator lhs,
    6599  VkDeviceSize rhsSize) const
    6600  {
    6601  return lhs->size < rhsSize;
    6602  }
    6603 };
    6604 
    6605 
    6607 // class VmaBlockMetadata
    6608 
    6609 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    6610  m_Size(0),
    6611  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
    6612 {
    6613 }
    6614 
    6615 #if VMA_STATS_STRING_ENABLED
    6616 
    6617 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
    6618  VkDeviceSize unusedBytes,
    6619  size_t allocationCount,
    6620  size_t unusedRangeCount) const
    6621 {
    6622  json.BeginObject();
    6623 
    6624  json.WriteString("TotalBytes");
    6625  json.WriteNumber(GetSize());
    6626 
    6627  json.WriteString("UnusedBytes");
    6628  json.WriteNumber(unusedBytes);
    6629 
    6630  json.WriteString("Allocations");
    6631  json.WriteNumber((uint64_t)allocationCount);
    6632 
    6633  json.WriteString("UnusedRanges");
    6634  json.WriteNumber((uint64_t)unusedRangeCount);
    6635 
    6636  json.WriteString("Suballocations");
    6637  json.BeginArray();
    6638 }
    6639 
    6640 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    6641  VkDeviceSize offset,
    6642  VmaAllocation hAllocation) const
    6643 {
    6644  json.BeginObject(true);
    6645 
    6646  json.WriteString("Offset");
    6647  json.WriteNumber(offset);
    6648 
    6649  hAllocation->PrintParameters(json);
    6650 
    6651  json.EndObject();
    6652 }
    6653 
    6654 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    6655  VkDeviceSize offset,
    6656  VkDeviceSize size) const
    6657 {
    6658  json.BeginObject(true);
    6659 
    6660  json.WriteString("Offset");
    6661  json.WriteNumber(offset);
    6662 
    6663  json.WriteString("Type");
    6664  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
    6665 
    6666  json.WriteString("Size");
    6667  json.WriteNumber(size);
    6668 
    6669  json.EndObject();
    6670 }
    6671 
    6672 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
    6673 {
    6674  json.EndArray();
    6675  json.EndObject();
    6676 }
    6677 
    6678 #endif // #if VMA_STATS_STRING_ENABLED
    6679 
    6681 // class VmaBlockMetadata_Generic
    6682 
    6683 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
    6684  VmaBlockMetadata(hAllocator),
    6685  m_FreeCount(0),
    6686  m_SumFreeSize(0),
    6687  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    6688  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    6689 {
    6690 }
    6691 
    6692 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
    6693 {
    6694 }
    6695 
    6696 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
    6697 {
    6698  VmaBlockMetadata::Init(size);
    6699 
    6700  m_FreeCount = 1;
    6701  m_SumFreeSize = size;
    6702 
    6703  VmaSuballocation suballoc = {};
    6704  suballoc.offset = 0;
    6705  suballoc.size = size;
    6706  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    6707  suballoc.hAllocation = VK_NULL_HANDLE;
    6708 
    6709  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    6710  m_Suballocations.push_back(suballoc);
    6711  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    6712  --suballocItem;
    6713  m_FreeSuballocationsBySize.push_back(suballocItem);
    6714 }
    6715 
    6716 bool VmaBlockMetadata_Generic::Validate() const
    6717 {
    6718  VMA_VALIDATE(!m_Suballocations.empty());
    6719 
    6720  // Expected offset of new suballocation as calculated from previous ones.
    6721  VkDeviceSize calculatedOffset = 0;
    6722  // Expected number of free suballocations as calculated from traversing their list.
    6723  uint32_t calculatedFreeCount = 0;
    6724  // Expected sum size of free suballocations as calculated from traversing their list.
    6725  VkDeviceSize calculatedSumFreeSize = 0;
    6726  // Expected number of free suballocations that should be registered in
    6727  // m_FreeSuballocationsBySize calculated from traversing their list.
    6728  size_t freeSuballocationsToRegister = 0;
    6729  // True if previous visited suballocation was free.
    6730  bool prevFree = false;
    6731 
    6732  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6733  suballocItem != m_Suballocations.cend();
    6734  ++suballocItem)
    6735  {
    6736  const VmaSuballocation& subAlloc = *suballocItem;
    6737 
    6738  // Actual offset of this suballocation doesn't match expected one.
    6739  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
    6740 
    6741  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    6742  // Two adjacent free suballocations are invalid. They should be merged.
    6743  VMA_VALIDATE(!prevFree || !currFree);
    6744 
    6745  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
    6746 
    6747  if(currFree)
    6748  {
    6749  calculatedSumFreeSize += subAlloc.size;
    6750  ++calculatedFreeCount;
    6751  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6752  {
    6753  ++freeSuballocationsToRegister;
    6754  }
    6755 
    6756  // Margin required between allocations - every free space must be at least that large.
    6757  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
    6758  }
    6759  else
    6760  {
    6761  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
    6762  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
    6763 
    6764  // Margin required between allocations - previous allocation must be free.
    6765  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
    6766  }
    6767 
    6768  calculatedOffset += subAlloc.size;
    6769  prevFree = currFree;
    6770  }
    6771 
    6772  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    6773  // match expected one.
    6774  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
    6775 
    6776  VkDeviceSize lastSize = 0;
    6777  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    6778  {
    6779  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    6780 
    6781  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    6782  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    6783  // They must be sorted by size ascending.
    6784  VMA_VALIDATE(suballocItem->size >= lastSize);
    6785 
    6786  lastSize = suballocItem->size;
    6787  }
    6788 
    6789  // Check if totals match calculacted values.
    6790  VMA_VALIDATE(ValidateFreeSuballocationList());
    6791  VMA_VALIDATE(calculatedOffset == GetSize());
    6792  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
    6793  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
    6794 
    6795  return true;
    6796 }
    6797 
    6798 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
    6799 {
    6800  if(!m_FreeSuballocationsBySize.empty())
    6801  {
    6802  return m_FreeSuballocationsBySize.back()->size;
    6803  }
    6804  else
    6805  {
    6806  return 0;
    6807  }
    6808 }
    6809 
    6810 bool VmaBlockMetadata_Generic::IsEmpty() const
    6811 {
    6812  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    6813 }
    6814 
    6815 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    6816 {
    6817  outInfo.blockCount = 1;
    6818 
    6819  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6820  outInfo.allocationCount = rangeCount - m_FreeCount;
    6821  outInfo.unusedRangeCount = m_FreeCount;
    6822 
    6823  outInfo.unusedBytes = m_SumFreeSize;
    6824  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
    6825 
    6826  outInfo.allocationSizeMin = UINT64_MAX;
    6827  outInfo.allocationSizeMax = 0;
    6828  outInfo.unusedRangeSizeMin = UINT64_MAX;
    6829  outInfo.unusedRangeSizeMax = 0;
    6830 
    6831  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6832  suballocItem != m_Suballocations.cend();
    6833  ++suballocItem)
    6834  {
    6835  const VmaSuballocation& suballoc = *suballocItem;
    6836  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    6837  {
    6838  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    6839  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    6840  }
    6841  else
    6842  {
    6843  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    6844  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    6845  }
    6846  }
    6847 }
    6848 
    6849 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
    6850 {
    6851  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6852 
    6853  inoutStats.size += GetSize();
    6854  inoutStats.unusedSize += m_SumFreeSize;
    6855  inoutStats.allocationCount += rangeCount - m_FreeCount;
    6856  inoutStats.unusedRangeCount += m_FreeCount;
    6857  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    6858 }
    6859 
    6860 #if VMA_STATS_STRING_ENABLED
    6861 
    6862 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
    6863 {
    6864  PrintDetailedMap_Begin(json,
    6865  m_SumFreeSize, // unusedBytes
    6866  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
    6867  m_FreeCount); // unusedRangeCount
    6868 
    6869  size_t i = 0;
    6870  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6871  suballocItem != m_Suballocations.cend();
    6872  ++suballocItem, ++i)
    6873  {
    6874  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    6875  {
    6876  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
    6877  }
    6878  else
    6879  {
    6880  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
    6881  }
    6882  }
    6883 
    6884  PrintDetailedMap_End(json);
    6885 }
    6886 
    6887 #endif // #if VMA_STATS_STRING_ENABLED
    6888 
    6889 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
    6890  uint32_t currentFrameIndex,
    6891  uint32_t frameInUseCount,
    6892  VkDeviceSize bufferImageGranularity,
    6893  VkDeviceSize allocSize,
    6894  VkDeviceSize allocAlignment,
    6895  bool upperAddress,
    6896  VmaSuballocationType allocType,
    6897  bool canMakeOtherLost,
    6898  uint32_t strategy,
    6899  VmaAllocationRequest* pAllocationRequest)
    6900 {
    6901  VMA_ASSERT(allocSize > 0);
    6902  VMA_ASSERT(!upperAddress);
    6903  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    6904  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    6905  VMA_HEAVY_ASSERT(Validate());
    6906 
    6907  // There is not enough total free space in this block to fullfill the request: Early return.
    6908  if(canMakeOtherLost == false &&
    6909  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
    6910  {
    6911  return false;
    6912  }
    6913 
    6914  // New algorithm, efficiently searching freeSuballocationsBySize.
    6915  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    6916  if(freeSuballocCount > 0)
    6917  {
    6919  {
    6920  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
    6921  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    6922  m_FreeSuballocationsBySize.data(),
    6923  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    6924  allocSize + 2 * VMA_DEBUG_MARGIN,
    6925  VmaSuballocationItemSizeLess());
    6926  size_t index = it - m_FreeSuballocationsBySize.data();
    6927  for(; index < freeSuballocCount; ++index)
    6928  {
    6929  if(CheckAllocation(
    6930  currentFrameIndex,
    6931  frameInUseCount,
    6932  bufferImageGranularity,
    6933  allocSize,
    6934  allocAlignment,
    6935  allocType,
    6936  m_FreeSuballocationsBySize[index],
    6937  false, // canMakeOtherLost
    6938  &pAllocationRequest->offset,
    6939  &pAllocationRequest->itemsToMakeLostCount,
    6940  &pAllocationRequest->sumFreeSize,
    6941  &pAllocationRequest->sumItemSize))
    6942  {
    6943  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    6944  return true;
    6945  }
    6946  }
    6947  }
    6948  else // WORST_FIT, FIRST_FIT
    6949  {
    6950  // Search staring from biggest suballocations.
    6951  for(size_t index = freeSuballocCount; index--; )
    6952  {
    6953  if(CheckAllocation(
    6954  currentFrameIndex,
    6955  frameInUseCount,
    6956  bufferImageGranularity,
    6957  allocSize,
    6958  allocAlignment,
    6959  allocType,
    6960  m_FreeSuballocationsBySize[index],
    6961  false, // canMakeOtherLost
    6962  &pAllocationRequest->offset,
    6963  &pAllocationRequest->itemsToMakeLostCount,
    6964  &pAllocationRequest->sumFreeSize,
    6965  &pAllocationRequest->sumItemSize))
    6966  {
    6967  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    6968  return true;
    6969  }
    6970  }
    6971  }
    6972  }
    6973 
    6974  if(canMakeOtherLost)
    6975  {
    6976  // Brute-force algorithm. TODO: Come up with something better.
    6977 
    6978  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
    6979  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
    6980 
    6981  VmaAllocationRequest tmpAllocRequest = {};
    6982  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    6983  suballocIt != m_Suballocations.end();
    6984  ++suballocIt)
    6985  {
    6986  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    6987  suballocIt->hAllocation->CanBecomeLost())
    6988  {
    6989  if(CheckAllocation(
    6990  currentFrameIndex,
    6991  frameInUseCount,
    6992  bufferImageGranularity,
    6993  allocSize,
    6994  allocAlignment,
    6995  allocType,
    6996  suballocIt,
    6997  canMakeOtherLost,
    6998  &tmpAllocRequest.offset,
    6999  &tmpAllocRequest.itemsToMakeLostCount,
    7000  &tmpAllocRequest.sumFreeSize,
    7001  &tmpAllocRequest.sumItemSize))
    7002  {
    7003  tmpAllocRequest.item = suballocIt;
    7004 
    7005  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
    7007  {
    7008  *pAllocationRequest = tmpAllocRequest;
    7009  }
    7010  }
    7011  }
    7012  }
    7013 
    7014  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
    7015  {
    7016  return true;
    7017  }
    7018  }
    7019 
    7020  return false;
    7021 }
    7022 
    7023 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
    7024  uint32_t currentFrameIndex,
    7025  uint32_t frameInUseCount,
    7026  VmaAllocationRequest* pAllocationRequest)
    7027 {
    7028  while(pAllocationRequest->itemsToMakeLostCount > 0)
    7029  {
    7030  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    7031  {
    7032  ++pAllocationRequest->item;
    7033  }
    7034  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7035  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    7036  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    7037  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7038  {
    7039  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    7040  --pAllocationRequest->itemsToMakeLostCount;
    7041  }
    7042  else
    7043  {
    7044  return false;
    7045  }
    7046  }
    7047 
    7048  VMA_HEAVY_ASSERT(Validate());
    7049  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7050  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7051 
    7052  return true;
    7053 }
    7054 
    7055 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    7056 {
    7057  uint32_t lostAllocationCount = 0;
    7058  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7059  it != m_Suballocations.end();
    7060  ++it)
    7061  {
    7062  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    7063  it->hAllocation->CanBecomeLost() &&
    7064  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7065  {
    7066  it = FreeSuballocation(it);
    7067  ++lostAllocationCount;
    7068  }
    7069  }
    7070  return lostAllocationCount;
    7071 }
    7072 
    7073 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
    7074 {
    7075  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7076  it != m_Suballocations.end();
    7077  ++it)
    7078  {
    7079  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    7080  {
    7081  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
    7082  {
    7083  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    7084  return VK_ERROR_VALIDATION_FAILED_EXT;
    7085  }
    7086  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
    7087  {
    7088  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    7089  return VK_ERROR_VALIDATION_FAILED_EXT;
    7090  }
    7091  }
    7092  }
    7093 
    7094  return VK_SUCCESS;
    7095 }
    7096 
    7097 void VmaBlockMetadata_Generic::Alloc(
    7098  const VmaAllocationRequest& request,
    7099  VmaSuballocationType type,
    7100  VkDeviceSize allocSize,
    7101  bool upperAddress,
    7102  VmaAllocation hAllocation)
    7103 {
    7104  VMA_ASSERT(!upperAddress);
    7105  VMA_ASSERT(request.item != m_Suballocations.end());
    7106  VmaSuballocation& suballoc = *request.item;
    7107  // Given suballocation is a free block.
    7108  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7109  // Given offset is inside this suballocation.
    7110  VMA_ASSERT(request.offset >= suballoc.offset);
    7111  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    7112  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    7113  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    7114 
    7115  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    7116  // it to become used.
    7117  UnregisterFreeSuballocation(request.item);
    7118 
    7119  suballoc.offset = request.offset;
    7120  suballoc.size = allocSize;
    7121  suballoc.type = type;
    7122  suballoc.hAllocation = hAllocation;
    7123 
    7124  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    7125  if(paddingEnd)
    7126  {
    7127  VmaSuballocation paddingSuballoc = {};
    7128  paddingSuballoc.offset = request.offset + allocSize;
    7129  paddingSuballoc.size = paddingEnd;
    7130  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7131  VmaSuballocationList::iterator next = request.item;
    7132  ++next;
    7133  const VmaSuballocationList::iterator paddingEndItem =
    7134  m_Suballocations.insert(next, paddingSuballoc);
    7135  RegisterFreeSuballocation(paddingEndItem);
    7136  }
    7137 
    7138  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    7139  if(paddingBegin)
    7140  {
    7141  VmaSuballocation paddingSuballoc = {};
    7142  paddingSuballoc.offset = request.offset - paddingBegin;
    7143  paddingSuballoc.size = paddingBegin;
    7144  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7145  const VmaSuballocationList::iterator paddingBeginItem =
    7146  m_Suballocations.insert(request.item, paddingSuballoc);
    7147  RegisterFreeSuballocation(paddingBeginItem);
    7148  }
    7149 
    7150  // Update totals.
    7151  m_FreeCount = m_FreeCount - 1;
    7152  if(paddingBegin > 0)
    7153  {
    7154  ++m_FreeCount;
    7155  }
    7156  if(paddingEnd > 0)
    7157  {
    7158  ++m_FreeCount;
    7159  }
    7160  m_SumFreeSize -= allocSize;
    7161 }
    7162 
    7163 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
    7164 {
    7165  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7166  suballocItem != m_Suballocations.end();
    7167  ++suballocItem)
    7168  {
    7169  VmaSuballocation& suballoc = *suballocItem;
    7170  if(suballoc.hAllocation == allocation)
    7171  {
    7172  FreeSuballocation(suballocItem);
    7173  VMA_HEAVY_ASSERT(Validate());
    7174  return;
    7175  }
    7176  }
    7177  VMA_ASSERT(0 && "Not found!");
    7178 }
    7179 
    7180 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
    7181 {
    7182  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7183  suballocItem != m_Suballocations.end();
    7184  ++suballocItem)
    7185  {
    7186  VmaSuballocation& suballoc = *suballocItem;
    7187  if(suballoc.offset == offset)
    7188  {
    7189  FreeSuballocation(suballocItem);
    7190  return;
    7191  }
    7192  }
    7193  VMA_ASSERT(0 && "Not found!");
    7194 }
    7195 
    7196 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
    7197 {
    7198  VkDeviceSize lastSize = 0;
    7199  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    7200  {
    7201  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    7202 
    7203  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
    7204  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    7205  VMA_VALIDATE(it->size >= lastSize);
    7206  lastSize = it->size;
    7207  }
    7208  return true;
    7209 }
    7210 
    7211 bool VmaBlockMetadata_Generic::CheckAllocation(
    7212  uint32_t currentFrameIndex,
    7213  uint32_t frameInUseCount,
    7214  VkDeviceSize bufferImageGranularity,
    7215  VkDeviceSize allocSize,
    7216  VkDeviceSize allocAlignment,
    7217  VmaSuballocationType allocType,
    7218  VmaSuballocationList::const_iterator suballocItem,
    7219  bool canMakeOtherLost,
    7220  VkDeviceSize* pOffset,
    7221  size_t* itemsToMakeLostCount,
    7222  VkDeviceSize* pSumFreeSize,
    7223  VkDeviceSize* pSumItemSize) const
    7224 {
    7225  VMA_ASSERT(allocSize > 0);
    7226  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    7227  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    7228  VMA_ASSERT(pOffset != VMA_NULL);
    7229 
    7230  *itemsToMakeLostCount = 0;
    7231  *pSumFreeSize = 0;
    7232  *pSumItemSize = 0;
    7233 
    7234  if(canMakeOtherLost)
    7235  {
    7236  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7237  {
    7238  *pSumFreeSize = suballocItem->size;
    7239  }
    7240  else
    7241  {
    7242  if(suballocItem->hAllocation->CanBecomeLost() &&
    7243  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7244  {
    7245  ++*itemsToMakeLostCount;
    7246  *pSumItemSize = suballocItem->size;
    7247  }
    7248  else
    7249  {
    7250  return false;
    7251  }
    7252  }
    7253 
    7254  // Remaining size is too small for this request: Early return.
    7255  if(GetSize() - suballocItem->offset < allocSize)
    7256  {
    7257  return false;
    7258  }
    7259 
    7260  // Start from offset equal to beginning of this suballocation.
    7261  *pOffset = suballocItem->offset;
    7262 
    7263  // Apply VMA_DEBUG_MARGIN at the beginning.
    7264  if(VMA_DEBUG_MARGIN > 0)
    7265  {
    7266  *pOffset += VMA_DEBUG_MARGIN;
    7267  }
    7268 
    7269  // Apply alignment.
    7270  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7271 
    7272  // Check previous suballocations for BufferImageGranularity conflicts.
    7273  // Make bigger alignment if necessary.
    7274  if(bufferImageGranularity > 1)
    7275  {
    7276  bool bufferImageGranularityConflict = false;
    7277  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7278  while(prevSuballocItem != m_Suballocations.cbegin())
    7279  {
    7280  --prevSuballocItem;
    7281  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7282  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7283  {
    7284  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7285  {
    7286  bufferImageGranularityConflict = true;
    7287  break;
    7288  }
    7289  }
    7290  else
    7291  // Already on previous page.
    7292  break;
    7293  }
    7294  if(bufferImageGranularityConflict)
    7295  {
    7296  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7297  }
    7298  }
    7299 
    7300  // Now that we have final *pOffset, check if we are past suballocItem.
    7301  // If yes, return false - this function should be called for another suballocItem as starting point.
    7302  if(*pOffset >= suballocItem->offset + suballocItem->size)
    7303  {
    7304  return false;
    7305  }
    7306 
    7307  // Calculate padding at the beginning based on current offset.
    7308  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    7309 
    7310  // Calculate required margin at the end.
    7311  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7312 
    7313  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    7314  // Another early return check.
    7315  if(suballocItem->offset + totalSize > GetSize())
    7316  {
    7317  return false;
    7318  }
    7319 
    7320  // Advance lastSuballocItem until desired size is reached.
    7321  // Update itemsToMakeLostCount.
    7322  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    7323  if(totalSize > suballocItem->size)
    7324  {
    7325  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    7326  while(remainingSize > 0)
    7327  {
    7328  ++lastSuballocItem;
    7329  if(lastSuballocItem == m_Suballocations.cend())
    7330  {
    7331  return false;
    7332  }
    7333  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7334  {
    7335  *pSumFreeSize += lastSuballocItem->size;
    7336  }
    7337  else
    7338  {
    7339  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    7340  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    7341  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7342  {
    7343  ++*itemsToMakeLostCount;
    7344  *pSumItemSize += lastSuballocItem->size;
    7345  }
    7346  else
    7347  {
    7348  return false;
    7349  }
    7350  }
    7351  remainingSize = (lastSuballocItem->size < remainingSize) ?
    7352  remainingSize - lastSuballocItem->size : 0;
    7353  }
    7354  }
    7355 
    7356  // Check next suballocations for BufferImageGranularity conflicts.
    7357  // If conflict exists, we must mark more allocations lost or fail.
    7358  if(bufferImageGranularity > 1)
    7359  {
    7360  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    7361  ++nextSuballocItem;
    7362  while(nextSuballocItem != m_Suballocations.cend())
    7363  {
    7364  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7365  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7366  {
    7367  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7368  {
    7369  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    7370  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    7371  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7372  {
    7373  ++*itemsToMakeLostCount;
    7374  }
    7375  else
    7376  {
    7377  return false;
    7378  }
    7379  }
    7380  }
    7381  else
    7382  {
    7383  // Already on next page.
    7384  break;
    7385  }
    7386  ++nextSuballocItem;
    7387  }
    7388  }
    7389  }
    7390  else
    7391  {
    7392  const VmaSuballocation& suballoc = *suballocItem;
    7393  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7394 
    7395  *pSumFreeSize = suballoc.size;
    7396 
    7397  // Size of this suballocation is too small for this request: Early return.
    7398  if(suballoc.size < allocSize)
    7399  {
    7400  return false;
    7401  }
    7402 
    7403  // Start from offset equal to beginning of this suballocation.
    7404  *pOffset = suballoc.offset;
    7405 
    7406  // Apply VMA_DEBUG_MARGIN at the beginning.
    7407  if(VMA_DEBUG_MARGIN > 0)
    7408  {
    7409  *pOffset += VMA_DEBUG_MARGIN;
    7410  }
    7411 
    7412  // Apply alignment.
    7413  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7414 
    7415  // Check previous suballocations for BufferImageGranularity conflicts.
    7416  // Make bigger alignment if necessary.
    7417  if(bufferImageGranularity > 1)
    7418  {
    7419  bool bufferImageGranularityConflict = false;
    7420  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7421  while(prevSuballocItem != m_Suballocations.cbegin())
    7422  {
    7423  --prevSuballocItem;
    7424  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7425  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7426  {
    7427  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7428  {
    7429  bufferImageGranularityConflict = true;
    7430  break;
    7431  }
    7432  }
    7433  else
    7434  // Already on previous page.
    7435  break;
    7436  }
    7437  if(bufferImageGranularityConflict)
    7438  {
    7439  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7440  }
    7441  }
    7442 
    7443  // Calculate padding at the beginning based on current offset.
    7444  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    7445 
    7446  // Calculate required margin at the end.
    7447  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7448 
    7449  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    7450  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    7451  {
    7452  return false;
    7453  }
    7454 
    7455  // Check next suballocations for BufferImageGranularity conflicts.
    7456  // If conflict exists, allocation cannot be made here.
    7457  if(bufferImageGranularity > 1)
    7458  {
    7459  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    7460  ++nextSuballocItem;
    7461  while(nextSuballocItem != m_Suballocations.cend())
    7462  {
    7463  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7464  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7465  {
    7466  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7467  {
    7468  return false;
    7469  }
    7470  }
    7471  else
    7472  {
    7473  // Already on next page.
    7474  break;
    7475  }
    7476  ++nextSuballocItem;
    7477  }
    7478  }
    7479  }
    7480 
    7481  // All tests passed: Success. pOffset is already filled.
    7482  return true;
    7483 }
    7484 
    7485 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
    7486 {
    7487  VMA_ASSERT(item != m_Suballocations.end());
    7488  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7489 
    7490  VmaSuballocationList::iterator nextItem = item;
    7491  ++nextItem;
    7492  VMA_ASSERT(nextItem != m_Suballocations.end());
    7493  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    7494 
    7495  item->size += nextItem->size;
    7496  --m_FreeCount;
    7497  m_Suballocations.erase(nextItem);
    7498 }
    7499 
    7500 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    7501 {
    7502  // Change this suballocation to be marked as free.
    7503  VmaSuballocation& suballoc = *suballocItem;
    7504  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7505  suballoc.hAllocation = VK_NULL_HANDLE;
    7506 
    7507  // Update totals.
    7508  ++m_FreeCount;
    7509  m_SumFreeSize += suballoc.size;
    7510 
    7511  // Merge with previous and/or next suballocation if it's also free.
    7512  bool mergeWithNext = false;
    7513  bool mergeWithPrev = false;
    7514 
    7515  VmaSuballocationList::iterator nextItem = suballocItem;
    7516  ++nextItem;
    7517  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    7518  {
    7519  mergeWithNext = true;
    7520  }
    7521 
    7522  VmaSuballocationList::iterator prevItem = suballocItem;
    7523  if(suballocItem != m_Suballocations.begin())
    7524  {
    7525  --prevItem;
    7526  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7527  {
    7528  mergeWithPrev = true;
    7529  }
    7530  }
    7531 
    7532  if(mergeWithNext)
    7533  {
    7534  UnregisterFreeSuballocation(nextItem);
    7535  MergeFreeWithNext(suballocItem);
    7536  }
    7537 
    7538  if(mergeWithPrev)
    7539  {
    7540  UnregisterFreeSuballocation(prevItem);
    7541  MergeFreeWithNext(prevItem);
    7542  RegisterFreeSuballocation(prevItem);
    7543  return prevItem;
    7544  }
    7545  else
    7546  {
    7547  RegisterFreeSuballocation(suballocItem);
    7548  return suballocItem;
    7549  }
    7550 }
    7551 
    7552 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    7553 {
    7554  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7555  VMA_ASSERT(item->size > 0);
    7556 
    7557  // You may want to enable this validation at the beginning or at the end of
    7558  // this function, depending on what do you want to check.
    7559  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7560 
    7561  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7562  {
    7563  if(m_FreeSuballocationsBySize.empty())
    7564  {
    7565  m_FreeSuballocationsBySize.push_back(item);
    7566  }
    7567  else
    7568  {
    7569  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    7570  }
    7571  }
    7572 
    7573  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7574 }
    7575 
    7576 
    7577 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    7578 {
    7579  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7580  VMA_ASSERT(item->size > 0);
    7581 
    7582  // You may want to enable this validation at the beginning or at the end of
    7583  // this function, depending on what do you want to check.
    7584  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7585 
    7586  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7587  {
    7588  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7589  m_FreeSuballocationsBySize.data(),
    7590  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    7591  item,
    7592  VmaSuballocationItemSizeLess());
    7593  for(size_t index = it - m_FreeSuballocationsBySize.data();
    7594  index < m_FreeSuballocationsBySize.size();
    7595  ++index)
    7596  {
    7597  if(m_FreeSuballocationsBySize[index] == item)
    7598  {
    7599  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    7600  return;
    7601  }
    7602  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    7603  }
    7604  VMA_ASSERT(0 && "Not found.");
    7605  }
    7606 
    7607  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7608 }
    7609 
    7611 // class VmaBlockMetadata_Linear
    7612 
    7613 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
    7614  VmaBlockMetadata(hAllocator),
    7615  m_SumFreeSize(0),
    7616  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7617  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7618  m_1stVectorIndex(0),
    7619  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
    7620  m_1stNullItemsBeginCount(0),
    7621  m_1stNullItemsMiddleCount(0),
    7622  m_2ndNullItemsCount(0)
    7623 {
    7624 }
    7625 
    7626 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
    7627 {
    7628 }
    7629 
    7630 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
    7631 {
    7632  VmaBlockMetadata::Init(size);
    7633  m_SumFreeSize = size;
    7634 }
    7635 
    7636 bool VmaBlockMetadata_Linear::Validate() const
    7637 {
    7638  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7639  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7640 
    7641  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
    7642  VMA_VALIDATE(!suballocations1st.empty() ||
    7643  suballocations2nd.empty() ||
    7644  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
    7645 
    7646  if(!suballocations1st.empty())
    7647  {
    7648  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
    7649  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
    7650  // Null item at the end should be just pop_back().
    7651  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
    7652  }
    7653  if(!suballocations2nd.empty())
    7654  {
    7655  // Null item at the end should be just pop_back().
    7656  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
    7657  }
    7658 
    7659  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
    7660  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
    7661 
    7662  VkDeviceSize sumUsedSize = 0;
    7663  const size_t suballoc1stCount = suballocations1st.size();
    7664  VkDeviceSize offset = VMA_DEBUG_MARGIN;
    7665 
    7666  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    7667  {
    7668  const size_t suballoc2ndCount = suballocations2nd.size();
    7669  size_t nullItem2ndCount = 0;
    7670  for(size_t i = 0; i < suballoc2ndCount; ++i)
    7671  {
    7672  const VmaSuballocation& suballoc = suballocations2nd[i];
    7673  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7674 
    7675  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7676  VMA_VALIDATE(suballoc.offset >= offset);
    7677 
    7678  if(!currFree)
    7679  {
    7680  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7681  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7682  sumUsedSize += suballoc.size;
    7683  }
    7684  else
    7685  {
    7686  ++nullItem2ndCount;
    7687  }
    7688 
    7689  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7690  }
    7691 
    7692  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7693  }
    7694 
    7695  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
    7696  {
    7697  const VmaSuballocation& suballoc = suballocations1st[i];
    7698  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
    7699  suballoc.hAllocation == VK_NULL_HANDLE);
    7700  }
    7701 
    7702  size_t nullItem1stCount = m_1stNullItemsBeginCount;
    7703 
    7704  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
    7705  {
    7706  const VmaSuballocation& suballoc = suballocations1st[i];
    7707  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7708 
    7709  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7710  VMA_VALIDATE(suballoc.offset >= offset);
    7711  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
    7712 
    7713  if(!currFree)
    7714  {
    7715  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7716  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7717  sumUsedSize += suballoc.size;
    7718  }
    7719  else
    7720  {
    7721  ++nullItem1stCount;
    7722  }
    7723 
    7724  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7725  }
    7726  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
    7727 
    7728  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    7729  {
    7730  const size_t suballoc2ndCount = suballocations2nd.size();
    7731  size_t nullItem2ndCount = 0;
    7732  for(size_t i = suballoc2ndCount; i--; )
    7733  {
    7734  const VmaSuballocation& suballoc = suballocations2nd[i];
    7735  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7736 
    7737  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7738  VMA_VALIDATE(suballoc.offset >= offset);
    7739 
    7740  if(!currFree)
    7741  {
    7742  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7743  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7744  sumUsedSize += suballoc.size;
    7745  }
    7746  else
    7747  {
    7748  ++nullItem2ndCount;
    7749  }
    7750 
    7751  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7752  }
    7753 
    7754  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7755  }
    7756 
    7757  VMA_VALIDATE(offset <= GetSize());
    7758  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
    7759 
    7760  return true;
    7761 }
    7762 
    7763 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
    7764 {
    7765  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
    7766  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
    7767 }
    7768 
    7769 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
    7770 {
    7771  const VkDeviceSize size = GetSize();
    7772 
    7773  /*
    7774  We don't consider gaps inside allocation vectors with freed allocations because
    7775  they are not suitable for reuse in linear allocator. We consider only space that
    7776  is available for new allocations.
    7777  */
    7778  if(IsEmpty())
    7779  {
    7780  return size;
    7781  }
    7782 
    7783  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7784 
    7785  switch(m_2ndVectorMode)
    7786  {
    7787  case SECOND_VECTOR_EMPTY:
    7788  /*
    7789  Available space is after end of 1st, as well as before beginning of 1st (which
    7790  whould make it a ring buffer).
    7791  */
    7792  {
    7793  const size_t suballocations1stCount = suballocations1st.size();
    7794  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
    7795  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    7796  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
    7797  return VMA_MAX(
    7798  firstSuballoc.offset,
    7799  size - (lastSuballoc.offset + lastSuballoc.size));
    7800  }
    7801  break;
    7802 
    7803  case SECOND_VECTOR_RING_BUFFER:
    7804  /*
    7805  Available space is only between end of 2nd and beginning of 1st.
    7806  */
    7807  {
    7808  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7809  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
    7810  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
    7811  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
    7812  }
    7813  break;
    7814 
    7815  case SECOND_VECTOR_DOUBLE_STACK:
    7816  /*
    7817  Available space is only between end of 1st and top of 2nd.
    7818  */
    7819  {
    7820  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7821  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
    7822  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
    7823  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
    7824  }
    7825  break;
    7826 
    7827  default:
    7828  VMA_ASSERT(0);
    7829  return 0;
    7830  }
    7831 }
    7832 
    7833 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    7834 {
    7835  const VkDeviceSize size = GetSize();
    7836  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7837  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7838  const size_t suballoc1stCount = suballocations1st.size();
    7839  const size_t suballoc2ndCount = suballocations2nd.size();
    7840 
    7841  outInfo.blockCount = 1;
    7842  outInfo.allocationCount = (uint32_t)GetAllocationCount();
    7843  outInfo.unusedRangeCount = 0;
    7844  outInfo.usedBytes = 0;
    7845  outInfo.allocationSizeMin = UINT64_MAX;
    7846  outInfo.allocationSizeMax = 0;
    7847  outInfo.unusedRangeSizeMin = UINT64_MAX;
    7848  outInfo.unusedRangeSizeMax = 0;
    7849 
    7850  VkDeviceSize lastOffset = 0;
    7851 
    7852  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    7853  {
    7854  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    7855  size_t nextAlloc2ndIndex = 0;
    7856  while(lastOffset < freeSpace2ndTo1stEnd)
    7857  {
    7858  // Find next non-null allocation or move nextAllocIndex to the end.
    7859  while(nextAlloc2ndIndex < suballoc2ndCount &&
    7860  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    7861  {
    7862  ++nextAlloc2ndIndex;
    7863  }
    7864 
    7865  // Found non-null allocation.
    7866  if(nextAlloc2ndIndex < suballoc2ndCount)
    7867  {
    7868  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    7869 
    7870  // 1. Process free space before this allocation.
    7871  if(lastOffset < suballoc.offset)
    7872  {
    7873  // There is free space from lastOffset to suballoc.offset.
    7874  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    7875  ++outInfo.unusedRangeCount;
    7876  outInfo.unusedBytes += unusedRangeSize;
    7877  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7878  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7879  }
    7880 
    7881  // 2. Process this allocation.
    7882  // There is allocation with suballoc.offset, suballoc.size.
    7883  outInfo.usedBytes += suballoc.size;
    7884  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    7885  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    7886 
    7887  // 3. Prepare for next iteration.
    7888  lastOffset = suballoc.offset + suballoc.size;
    7889  ++nextAlloc2ndIndex;
    7890  }
    7891  // We are at the end.
    7892  else
    7893  {
    7894  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    7895  if(lastOffset < freeSpace2ndTo1stEnd)
    7896  {
    7897  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    7898  ++outInfo.unusedRangeCount;
    7899  outInfo.unusedBytes += unusedRangeSize;
    7900  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7901  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7902  }
    7903 
    7904  // End of loop.
    7905  lastOffset = freeSpace2ndTo1stEnd;
    7906  }
    7907  }
    7908  }
    7909 
    7910  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    7911  const VkDeviceSize freeSpace1stTo2ndEnd =
    7912  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    7913  while(lastOffset < freeSpace1stTo2ndEnd)
    7914  {
    7915  // Find next non-null allocation or move nextAllocIndex to the end.
    7916  while(nextAlloc1stIndex < suballoc1stCount &&
    7917  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    7918  {
    7919  ++nextAlloc1stIndex;
    7920  }
    7921 
    7922  // Found non-null allocation.
    7923  if(nextAlloc1stIndex < suballoc1stCount)
    7924  {
    7925  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    7926 
    7927  // 1. Process free space before this allocation.
    7928  if(lastOffset < suballoc.offset)
    7929  {
    7930  // There is free space from lastOffset to suballoc.offset.
    7931  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    7932  ++outInfo.unusedRangeCount;
    7933  outInfo.unusedBytes += unusedRangeSize;
    7934  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7935  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7936  }
    7937 
    7938  // 2. Process this allocation.
    7939  // There is allocation with suballoc.offset, suballoc.size.
    7940  outInfo.usedBytes += suballoc.size;
    7941  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    7942  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    7943 
    7944  // 3. Prepare for next iteration.
    7945  lastOffset = suballoc.offset + suballoc.size;
    7946  ++nextAlloc1stIndex;
    7947  }
    7948  // We are at the end.
    7949  else
    7950  {
    7951  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    7952  if(lastOffset < freeSpace1stTo2ndEnd)
    7953  {
    7954  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    7955  ++outInfo.unusedRangeCount;
    7956  outInfo.unusedBytes += unusedRangeSize;
    7957  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7958  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7959  }
    7960 
    7961  // End of loop.
    7962  lastOffset = freeSpace1stTo2ndEnd;
    7963  }
    7964  }
    7965 
    7966  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    7967  {
    7968  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    7969  while(lastOffset < size)
    7970  {
    7971  // Find next non-null allocation or move nextAllocIndex to the end.
    7972  while(nextAlloc2ndIndex != SIZE_MAX &&
    7973  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    7974  {
    7975  --nextAlloc2ndIndex;
    7976  }
    7977 
    7978  // Found non-null allocation.
    7979  if(nextAlloc2ndIndex != SIZE_MAX)
    7980  {
    7981  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    7982 
    7983  // 1. Process free space before this allocation.
    7984  if(lastOffset < suballoc.offset)
    7985  {
    7986  // There is free space from lastOffset to suballoc.offset.
    7987  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    7988  ++outInfo.unusedRangeCount;
    7989  outInfo.unusedBytes += unusedRangeSize;
    7990  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7991  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7992  }
    7993 
    7994  // 2. Process this allocation.
    7995  // There is allocation with suballoc.offset, suballoc.size.
    7996  outInfo.usedBytes += suballoc.size;
    7997  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    7998  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    7999 
    8000  // 3. Prepare for next iteration.
    8001  lastOffset = suballoc.offset + suballoc.size;
    8002  --nextAlloc2ndIndex;
    8003  }
    8004  // We are at the end.
    8005  else
    8006  {
    8007  // There is free space from lastOffset to size.
    8008  if(lastOffset < size)
    8009  {
    8010  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8011  ++outInfo.unusedRangeCount;
    8012  outInfo.unusedBytes += unusedRangeSize;
    8013  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8014  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8015  }
    8016 
    8017  // End of loop.
    8018  lastOffset = size;
    8019  }
    8020  }
    8021  }
    8022 
    8023  outInfo.unusedBytes = size - outInfo.usedBytes;
    8024 }
    8025 
    8026 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
    8027 {
    8028  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8029  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8030  const VkDeviceSize size = GetSize();
    8031  const size_t suballoc1stCount = suballocations1st.size();
    8032  const size_t suballoc2ndCount = suballocations2nd.size();
    8033 
    8034  inoutStats.size += size;
    8035 
    8036  VkDeviceSize lastOffset = 0;
    8037 
    8038  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8039  {
    8040  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8041  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
    8042  while(lastOffset < freeSpace2ndTo1stEnd)
    8043  {
    8044  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8045  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8046  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8047  {
    8048  ++nextAlloc2ndIndex;
    8049  }
    8050 
    8051  // Found non-null allocation.
    8052  if(nextAlloc2ndIndex < suballoc2ndCount)
    8053  {
    8054  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8055 
    8056  // 1. Process free space before this allocation.
    8057  if(lastOffset < suballoc.offset)
    8058  {
    8059  // There is free space from lastOffset to suballoc.offset.
    8060  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8061  inoutStats.unusedSize += unusedRangeSize;
    8062  ++inoutStats.unusedRangeCount;
    8063  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8064  }
    8065 
    8066  // 2. Process this allocation.
    8067  // There is allocation with suballoc.offset, suballoc.size.
    8068  ++inoutStats.allocationCount;
    8069 
    8070  // 3. Prepare for next iteration.
    8071  lastOffset = suballoc.offset + suballoc.size;
    8072  ++nextAlloc2ndIndex;
    8073  }
    8074  // We are at the end.
    8075  else
    8076  {
    8077  if(lastOffset < freeSpace2ndTo1stEnd)
    8078  {
    8079  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8080  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8081  inoutStats.unusedSize += unusedRangeSize;
    8082  ++inoutStats.unusedRangeCount;
    8083  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8084  }
    8085 
    8086  // End of loop.
    8087  lastOffset = freeSpace2ndTo1stEnd;
    8088  }
    8089  }
    8090  }
    8091 
    8092  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8093  const VkDeviceSize freeSpace1stTo2ndEnd =
    8094  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8095  while(lastOffset < freeSpace1stTo2ndEnd)
    8096  {
    8097  // Find next non-null allocation or move nextAllocIndex to the end.
    8098  while(nextAlloc1stIndex < suballoc1stCount &&
    8099  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8100  {
    8101  ++nextAlloc1stIndex;
    8102  }
    8103 
    8104  // Found non-null allocation.
    8105  if(nextAlloc1stIndex < suballoc1stCount)
    8106  {
    8107  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8108 
    8109  // 1. Process free space before this allocation.
    8110  if(lastOffset < suballoc.offset)
    8111  {
    8112  // There is free space from lastOffset to suballoc.offset.
    8113  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8114  inoutStats.unusedSize += unusedRangeSize;
    8115  ++inoutStats.unusedRangeCount;
    8116  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8117  }
    8118 
    8119  // 2. Process this allocation.
    8120  // There is allocation with suballoc.offset, suballoc.size.
    8121  ++inoutStats.allocationCount;
    8122 
    8123  // 3. Prepare for next iteration.
    8124  lastOffset = suballoc.offset + suballoc.size;
    8125  ++nextAlloc1stIndex;
    8126  }
    8127  // We are at the end.
    8128  else
    8129  {
    8130  if(lastOffset < freeSpace1stTo2ndEnd)
    8131  {
    8132  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8133  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8134  inoutStats.unusedSize += unusedRangeSize;
    8135  ++inoutStats.unusedRangeCount;
    8136  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8137  }
    8138 
    8139  // End of loop.
    8140  lastOffset = freeSpace1stTo2ndEnd;
    8141  }
    8142  }
    8143 
    8144  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8145  {
    8146  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8147  while(lastOffset < size)
    8148  {
    8149  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8150  while(nextAlloc2ndIndex != SIZE_MAX &&
    8151  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8152  {
    8153  --nextAlloc2ndIndex;
    8154  }
    8155 
    8156  // Found non-null allocation.
    8157  if(nextAlloc2ndIndex != SIZE_MAX)
    8158  {
    8159  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8160 
    8161  // 1. Process free space before this allocation.
    8162  if(lastOffset < suballoc.offset)
    8163  {
    8164  // There is free space from lastOffset to suballoc.offset.
    8165  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8166  inoutStats.unusedSize += unusedRangeSize;
    8167  ++inoutStats.unusedRangeCount;
    8168  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8169  }
    8170 
    8171  // 2. Process this allocation.
    8172  // There is allocation with suballoc.offset, suballoc.size.
    8173  ++inoutStats.allocationCount;
    8174 
    8175  // 3. Prepare for next iteration.
    8176  lastOffset = suballoc.offset + suballoc.size;
    8177  --nextAlloc2ndIndex;
    8178  }
    8179  // We are at the end.
    8180  else
    8181  {
    8182  if(lastOffset < size)
    8183  {
    8184  // There is free space from lastOffset to size.
    8185  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8186  inoutStats.unusedSize += unusedRangeSize;
    8187  ++inoutStats.unusedRangeCount;
    8188  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8189  }
    8190 
    8191  // End of loop.
    8192  lastOffset = size;
    8193  }
    8194  }
    8195  }
    8196 }
    8197 
    8198 #if VMA_STATS_STRING_ENABLED
    8199 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
    8200 {
    8201  const VkDeviceSize size = GetSize();
    8202  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8203  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8204  const size_t suballoc1stCount = suballocations1st.size();
    8205  const size_t suballoc2ndCount = suballocations2nd.size();
    8206 
    8207  // FIRST PASS
    8208 
    8209  size_t unusedRangeCount = 0;
    8210  VkDeviceSize usedBytes = 0;
    8211 
    8212  VkDeviceSize lastOffset = 0;
    8213 
    8214  size_t alloc2ndCount = 0;
    8215  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8216  {
    8217  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8218  size_t nextAlloc2ndIndex = 0;
    8219  while(lastOffset < freeSpace2ndTo1stEnd)
    8220  {
    8221  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8222  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8223  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8224  {
    8225  ++nextAlloc2ndIndex;
    8226  }
    8227 
    8228  // Found non-null allocation.
    8229  if(nextAlloc2ndIndex < suballoc2ndCount)
    8230  {
    8231  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8232 
    8233  // 1. Process free space before this allocation.
    8234  if(lastOffset < suballoc.offset)
    8235  {
    8236  // There is free space from lastOffset to suballoc.offset.
    8237  ++unusedRangeCount;
    8238  }
    8239 
    8240  // 2. Process this allocation.
    8241  // There is allocation with suballoc.offset, suballoc.size.
    8242  ++alloc2ndCount;
    8243  usedBytes += suballoc.size;
    8244 
    8245  // 3. Prepare for next iteration.
    8246  lastOffset = suballoc.offset + suballoc.size;
    8247  ++nextAlloc2ndIndex;
    8248  }
    8249  // We are at the end.
    8250  else
    8251  {
    8252  if(lastOffset < freeSpace2ndTo1stEnd)
    8253  {
    8254  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8255  ++unusedRangeCount;
    8256  }
    8257 
    8258  // End of loop.
    8259  lastOffset = freeSpace2ndTo1stEnd;
    8260  }
    8261  }
    8262  }
    8263 
    8264  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8265  size_t alloc1stCount = 0;
    8266  const VkDeviceSize freeSpace1stTo2ndEnd =
    8267  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8268  while(lastOffset < freeSpace1stTo2ndEnd)
    8269  {
    8270  // Find next non-null allocation or move nextAllocIndex to the end.
    8271  while(nextAlloc1stIndex < suballoc1stCount &&
    8272  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8273  {
    8274  ++nextAlloc1stIndex;
    8275  }
    8276 
    8277  // Found non-null allocation.
    8278  if(nextAlloc1stIndex < suballoc1stCount)
    8279  {
    8280  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8281 
    8282  // 1. Process free space before this allocation.
    8283  if(lastOffset < suballoc.offset)
    8284  {
    8285  // There is free space from lastOffset to suballoc.offset.
    8286  ++unusedRangeCount;
    8287  }
    8288 
    8289  // 2. Process this allocation.
    8290  // There is allocation with suballoc.offset, suballoc.size.
    8291  ++alloc1stCount;
    8292  usedBytes += suballoc.size;
    8293 
    8294  // 3. Prepare for next iteration.
    8295  lastOffset = suballoc.offset + suballoc.size;
    8296  ++nextAlloc1stIndex;
    8297  }
    8298  // We are at the end.
    8299  else
    8300  {
    8301  if(lastOffset < size)
    8302  {
    8303  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8304  ++unusedRangeCount;
    8305  }
    8306 
    8307  // End of loop.
    8308  lastOffset = freeSpace1stTo2ndEnd;
    8309  }
    8310  }
    8311 
    8312  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8313  {
    8314  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8315  while(lastOffset < size)
    8316  {
    8317  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8318  while(nextAlloc2ndIndex != SIZE_MAX &&
    8319  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8320  {
    8321  --nextAlloc2ndIndex;
    8322  }
    8323 
    8324  // Found non-null allocation.
    8325  if(nextAlloc2ndIndex != SIZE_MAX)
    8326  {
    8327  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8328 
    8329  // 1. Process free space before this allocation.
    8330  if(lastOffset < suballoc.offset)
    8331  {
    8332  // There is free space from lastOffset to suballoc.offset.
    8333  ++unusedRangeCount;
    8334  }
    8335 
    8336  // 2. Process this allocation.
    8337  // There is allocation with suballoc.offset, suballoc.size.
    8338  ++alloc2ndCount;
    8339  usedBytes += suballoc.size;
    8340 
    8341  // 3. Prepare for next iteration.
    8342  lastOffset = suballoc.offset + suballoc.size;
    8343  --nextAlloc2ndIndex;
    8344  }
    8345  // We are at the end.
    8346  else
    8347  {
    8348  if(lastOffset < size)
    8349  {
    8350  // There is free space from lastOffset to size.
    8351  ++unusedRangeCount;
    8352  }
    8353 
    8354  // End of loop.
    8355  lastOffset = size;
    8356  }
    8357  }
    8358  }
    8359 
    8360  const VkDeviceSize unusedBytes = size - usedBytes;
    8361  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
    8362 
    8363  // SECOND PASS
    8364  lastOffset = 0;
    8365 
    8366  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8367  {
    8368  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8369  size_t nextAlloc2ndIndex = 0;
    8370  while(lastOffset < freeSpace2ndTo1stEnd)
    8371  {
    8372  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8373  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8374  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8375  {
    8376  ++nextAlloc2ndIndex;
    8377  }
    8378 
    8379  // Found non-null allocation.
    8380  if(nextAlloc2ndIndex < suballoc2ndCount)
    8381  {
    8382  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8383 
    8384  // 1. Process free space before this allocation.
    8385  if(lastOffset < suballoc.offset)
    8386  {
    8387  // There is free space from lastOffset to suballoc.offset.
    8388  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8389  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8390  }
    8391 
    8392  // 2. Process this allocation.
    8393  // There is allocation with suballoc.offset, suballoc.size.
    8394  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8395 
    8396  // 3. Prepare for next iteration.
    8397  lastOffset = suballoc.offset + suballoc.size;
    8398  ++nextAlloc2ndIndex;
    8399  }
    8400  // We are at the end.
    8401  else
    8402  {
    8403  if(lastOffset < freeSpace2ndTo1stEnd)
    8404  {
    8405  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8406  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8407  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8408  }
    8409 
    8410  // End of loop.
    8411  lastOffset = freeSpace2ndTo1stEnd;
    8412  }
    8413  }
    8414  }
    8415 
    8416  nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8417  while(lastOffset < freeSpace1stTo2ndEnd)
    8418  {
    8419  // Find next non-null allocation or move nextAllocIndex to the end.
    8420  while(nextAlloc1stIndex < suballoc1stCount &&
    8421  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8422  {
    8423  ++nextAlloc1stIndex;
    8424  }
    8425 
    8426  // Found non-null allocation.
    8427  if(nextAlloc1stIndex < suballoc1stCount)
    8428  {
    8429  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8430 
    8431  // 1. Process free space before this allocation.
    8432  if(lastOffset < suballoc.offset)
    8433  {
    8434  // There is free space from lastOffset to suballoc.offset.
    8435  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8436  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8437  }
    8438 
    8439  // 2. Process this allocation.
    8440  // There is allocation with suballoc.offset, suballoc.size.
    8441  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8442 
    8443  // 3. Prepare for next iteration.
    8444  lastOffset = suballoc.offset + suballoc.size;
    8445  ++nextAlloc1stIndex;
    8446  }
    8447  // We are at the end.
    8448  else
    8449  {
    8450  if(lastOffset < freeSpace1stTo2ndEnd)
    8451  {
    8452  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8453  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8454  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8455  }
    8456 
    8457  // End of loop.
    8458  lastOffset = freeSpace1stTo2ndEnd;
    8459  }
    8460  }
    8461 
    8462  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8463  {
    8464  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8465  while(lastOffset < size)
    8466  {
    8467  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8468  while(nextAlloc2ndIndex != SIZE_MAX &&
    8469  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8470  {
    8471  --nextAlloc2ndIndex;
    8472  }
    8473 
    8474  // Found non-null allocation.
    8475  if(nextAlloc2ndIndex != SIZE_MAX)
    8476  {
    8477  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8478 
    8479  // 1. Process free space before this allocation.
    8480  if(lastOffset < suballoc.offset)
    8481  {
    8482  // There is free space from lastOffset to suballoc.offset.
    8483  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8484  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8485  }
    8486 
    8487  // 2. Process this allocation.
    8488  // There is allocation with suballoc.offset, suballoc.size.
    8489  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8490 
    8491  // 3. Prepare for next iteration.
    8492  lastOffset = suballoc.offset + suballoc.size;
    8493  --nextAlloc2ndIndex;
    8494  }
    8495  // We are at the end.
    8496  else
    8497  {
    8498  if(lastOffset < size)
    8499  {
    8500  // There is free space from lastOffset to size.
    8501  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8502  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8503  }
    8504 
    8505  // End of loop.
    8506  lastOffset = size;
    8507  }
    8508  }
    8509  }
    8510 
    8511  PrintDetailedMap_End(json);
    8512 }
    8513 #endif // #if VMA_STATS_STRING_ENABLED
    8514 
    8515 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
    8516  uint32_t currentFrameIndex,
    8517  uint32_t frameInUseCount,
    8518  VkDeviceSize bufferImageGranularity,
    8519  VkDeviceSize allocSize,
    8520  VkDeviceSize allocAlignment,
    8521  bool upperAddress,
    8522  VmaSuballocationType allocType,
    8523  bool canMakeOtherLost,
    8524  uint32_t strategy,
    8525  VmaAllocationRequest* pAllocationRequest)
    8526 {
    8527  VMA_ASSERT(allocSize > 0);
    8528  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    8529  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    8530  VMA_HEAVY_ASSERT(Validate());
    8531 
    8532  const VkDeviceSize size = GetSize();
    8533  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8534  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8535 
    8536  if(upperAddress)
    8537  {
    8538  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8539  {
    8540  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
    8541  return false;
    8542  }
    8543 
    8544  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
    8545  if(allocSize > size)
    8546  {
    8547  return false;
    8548  }
    8549  VkDeviceSize resultBaseOffset = size - allocSize;
    8550  if(!suballocations2nd.empty())
    8551  {
    8552  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8553  resultBaseOffset = lastSuballoc.offset - allocSize;
    8554  if(allocSize > lastSuballoc.offset)
    8555  {
    8556  return false;
    8557  }
    8558  }
    8559 
    8560  // Start from offset equal to end of free space.
    8561  VkDeviceSize resultOffset = resultBaseOffset;
    8562 
    8563  // Apply VMA_DEBUG_MARGIN at the end.
    8564  if(VMA_DEBUG_MARGIN > 0)
    8565  {
    8566  if(resultOffset < VMA_DEBUG_MARGIN)
    8567  {
    8568  return false;
    8569  }
    8570  resultOffset -= VMA_DEBUG_MARGIN;
    8571  }
    8572 
    8573  // Apply alignment.
    8574  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
    8575 
    8576  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
    8577  // Make bigger alignment if necessary.
    8578  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8579  {
    8580  bool bufferImageGranularityConflict = false;
    8581  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8582  {
    8583  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8584  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8585  {
    8586  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
    8587  {
    8588  bufferImageGranularityConflict = true;
    8589  break;
    8590  }
    8591  }
    8592  else
    8593  // Already on previous page.
    8594  break;
    8595  }
    8596  if(bufferImageGranularityConflict)
    8597  {
    8598  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
    8599  }
    8600  }
    8601 
    8602  // There is enough free space.
    8603  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
    8604  suballocations1st.back().offset + suballocations1st.back().size :
    8605  0;
    8606  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
    8607  {
    8608  // Check previous suballocations for BufferImageGranularity conflicts.
    8609  // If conflict exists, allocation cannot be made here.
    8610  if(bufferImageGranularity > 1)
    8611  {
    8612  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8613  {
    8614  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8615  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8616  {
    8617  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
    8618  {
    8619  return false;
    8620  }
    8621  }
    8622  else
    8623  {
    8624  // Already on next page.
    8625  break;
    8626  }
    8627  }
    8628  }
    8629 
    8630  // All tests passed: Success.
    8631  pAllocationRequest->offset = resultOffset;
    8632  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
    8633  pAllocationRequest->sumItemSize = 0;
    8634  // pAllocationRequest->item unused.
    8635  pAllocationRequest->itemsToMakeLostCount = 0;
    8636  return true;
    8637  }
    8638  }
    8639  else // !upperAddress
    8640  {
    8641  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8642  {
    8643  // Try to allocate at the end of 1st vector.
    8644 
    8645  VkDeviceSize resultBaseOffset = 0;
    8646  if(!suballocations1st.empty())
    8647  {
    8648  const VmaSuballocation& lastSuballoc = suballocations1st.back();
    8649  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8650  }
    8651 
    8652  // Start from offset equal to beginning of free space.
    8653  VkDeviceSize resultOffset = resultBaseOffset;
    8654 
    8655  // Apply VMA_DEBUG_MARGIN at the beginning.
    8656  if(VMA_DEBUG_MARGIN > 0)
    8657  {
    8658  resultOffset += VMA_DEBUG_MARGIN;
    8659  }
    8660 
    8661  // Apply alignment.
    8662  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8663 
    8664  // Check previous suballocations for BufferImageGranularity conflicts.
    8665  // Make bigger alignment if necessary.
    8666  if(bufferImageGranularity > 1 && !suballocations1st.empty())
    8667  {
    8668  bool bufferImageGranularityConflict = false;
    8669  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8670  {
    8671  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8672  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8673  {
    8674  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8675  {
    8676  bufferImageGranularityConflict = true;
    8677  break;
    8678  }
    8679  }
    8680  else
    8681  // Already on previous page.
    8682  break;
    8683  }
    8684  if(bufferImageGranularityConflict)
    8685  {
    8686  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8687  }
    8688  }
    8689 
    8690  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
    8691  suballocations2nd.back().offset : size;
    8692 
    8693  // There is enough free space at the end after alignment.
    8694  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
    8695  {
    8696  // Check next suballocations for BufferImageGranularity conflicts.
    8697  // If conflict exists, allocation cannot be made here.
    8698  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8699  {
    8700  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8701  {
    8702  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8703  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8704  {
    8705  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8706  {
    8707  return false;
    8708  }
    8709  }
    8710  else
    8711  {
    8712  // Already on previous page.
    8713  break;
    8714  }
    8715  }
    8716  }
    8717 
    8718  // All tests passed: Success.
    8719  pAllocationRequest->offset = resultOffset;
    8720  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
    8721  pAllocationRequest->sumItemSize = 0;
    8722  // pAllocationRequest->item unused.
    8723  pAllocationRequest->itemsToMakeLostCount = 0;
    8724  return true;
    8725  }
    8726  }
    8727 
    8728  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
    8729  // beginning of 1st vector as the end of free space.
    8730  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8731  {
    8732  VMA_ASSERT(!suballocations1st.empty());
    8733 
    8734  VkDeviceSize resultBaseOffset = 0;
    8735  if(!suballocations2nd.empty())
    8736  {
    8737  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8738  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8739  }
    8740 
    8741  // Start from offset equal to beginning of free space.
    8742  VkDeviceSize resultOffset = resultBaseOffset;
    8743 
    8744  // Apply VMA_DEBUG_MARGIN at the beginning.
    8745  if(VMA_DEBUG_MARGIN > 0)
    8746  {
    8747  resultOffset += VMA_DEBUG_MARGIN;
    8748  }
    8749 
    8750  // Apply alignment.
    8751  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8752 
    8753  // Check previous suballocations for BufferImageGranularity conflicts.
    8754  // Make bigger alignment if necessary.
    8755  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8756  {
    8757  bool bufferImageGranularityConflict = false;
    8758  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
    8759  {
    8760  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
    8761  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8762  {
    8763  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8764  {
    8765  bufferImageGranularityConflict = true;
    8766  break;
    8767  }
    8768  }
    8769  else
    8770  // Already on previous page.
    8771  break;
    8772  }
    8773  if(bufferImageGranularityConflict)
    8774  {
    8775  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8776  }
    8777  }
    8778 
    8779  pAllocationRequest->itemsToMakeLostCount = 0;
    8780  pAllocationRequest->sumItemSize = 0;
    8781  size_t index1st = m_1stNullItemsBeginCount;
    8782 
    8783  if(canMakeOtherLost)
    8784  {
    8785  while(index1st < suballocations1st.size() &&
    8786  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
    8787  {
    8788  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
    8789  const VmaSuballocation& suballoc = suballocations1st[index1st];
    8790  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
    8791  {
    8792  // No problem.
    8793  }
    8794  else
    8795  {
    8796  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    8797  if(suballoc.hAllocation->CanBecomeLost() &&
    8798  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8799  {
    8800  ++pAllocationRequest->itemsToMakeLostCount;
    8801  pAllocationRequest->sumItemSize += suballoc.size;
    8802  }
    8803  else
    8804  {
    8805  return false;
    8806  }
    8807  }
    8808  ++index1st;
    8809  }
    8810 
    8811  // Check next suballocations for BufferImageGranularity conflicts.
    8812  // If conflict exists, we must mark more allocations lost or fail.
    8813  if(bufferImageGranularity > 1)
    8814  {
    8815  while(index1st < suballocations1st.size())
    8816  {
    8817  const VmaSuballocation& suballoc = suballocations1st[index1st];
    8818  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
    8819  {
    8820  if(suballoc.hAllocation != VK_NULL_HANDLE)
    8821  {
    8822  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
    8823  if(suballoc.hAllocation->CanBecomeLost() &&
    8824  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8825  {
    8826  ++pAllocationRequest->itemsToMakeLostCount;
    8827  pAllocationRequest->sumItemSize += suballoc.size;
    8828  }
    8829  else
    8830  {
    8831  return false;
    8832  }
    8833  }
    8834  }
    8835  else
    8836  {
    8837  // Already on next page.
    8838  break;
    8839  }
    8840  ++index1st;
    8841  }
    8842  }
    8843  }
    8844 
    8845  // There is enough free space at the end after alignment.
    8846  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
    8847  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
    8848  {
    8849  // Check next suballocations for BufferImageGranularity conflicts.
    8850  // If conflict exists, allocation cannot be made here.
    8851  if(bufferImageGranularity > 1)
    8852  {
    8853  for(size_t nextSuballocIndex = index1st;
    8854  nextSuballocIndex < suballocations1st.size();
    8855  nextSuballocIndex++)
    8856  {
    8857  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
    8858  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8859  {
    8860  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8861  {
    8862  return false;
    8863  }
    8864  }
    8865  else
    8866  {
    8867  // Already on next page.
    8868  break;
    8869  }
    8870  }
    8871  }
    8872 
    8873  // All tests passed: Success.
    8874  pAllocationRequest->offset = resultOffset;
    8875  pAllocationRequest->sumFreeSize =
    8876  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
    8877  - resultBaseOffset
    8878  - pAllocationRequest->sumItemSize;
    8879  // pAllocationRequest->item unused.
    8880  return true;
    8881  }
    8882  }
    8883  }
    8884 
    8885  return false;
    8886 }
    8887 
    8888 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
    8889  uint32_t currentFrameIndex,
    8890  uint32_t frameInUseCount,
    8891  VmaAllocationRequest* pAllocationRequest)
    8892 {
    8893  if(pAllocationRequest->itemsToMakeLostCount == 0)
    8894  {
    8895  return true;
    8896  }
    8897 
    8898  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
    8899 
    8900  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8901  size_t index1st = m_1stNullItemsBeginCount;
    8902  size_t madeLostCount = 0;
    8903  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
    8904  {
    8905  VMA_ASSERT(index1st < suballocations1st.size());
    8906  VmaSuballocation& suballoc = suballocations1st[index1st];
    8907  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    8908  {
    8909  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    8910  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
    8911  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8912  {
    8913  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8914  suballoc.hAllocation = VK_NULL_HANDLE;
    8915  m_SumFreeSize += suballoc.size;
    8916  ++m_1stNullItemsMiddleCount;
    8917  ++madeLostCount;
    8918  }
    8919  else
    8920  {
    8921  return false;
    8922  }
    8923  }
    8924  ++index1st;
    8925  }
    8926 
    8927  CleanupAfterFree();
    8928  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
    8929 
    8930  return true;
    8931 }
    8932 
    8933 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    8934 {
    8935  uint32_t lostAllocationCount = 0;
    8936 
    8937  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8938  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    8939  {
    8940  VmaSuballocation& suballoc = suballocations1st[i];
    8941  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    8942  suballoc.hAllocation->CanBecomeLost() &&
    8943  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8944  {
    8945  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8946  suballoc.hAllocation = VK_NULL_HANDLE;
    8947  ++m_1stNullItemsMiddleCount;
    8948  m_SumFreeSize += suballoc.size;
    8949  ++lostAllocationCount;
    8950  }
    8951  }
    8952 
    8953  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8954  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    8955  {
    8956  VmaSuballocation& suballoc = suballocations2nd[i];
    8957  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    8958  suballoc.hAllocation->CanBecomeLost() &&
    8959  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8960  {
    8961  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8962  suballoc.hAllocation = VK_NULL_HANDLE;
    8963  ++m_2ndNullItemsCount;
    8964  ++lostAllocationCount;
    8965  }
    8966  }
    8967 
    8968  if(lostAllocationCount)
    8969  {
    8970  CleanupAfterFree();
    8971  }
    8972 
    8973  return lostAllocationCount;
    8974 }
    8975 
    8976 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
    8977 {
    8978  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8979  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    8980  {
    8981  const VmaSuballocation& suballoc = suballocations1st[i];
    8982  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    8983  {
    8984  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    8985  {
    8986  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    8987  return VK_ERROR_VALIDATION_FAILED_EXT;
    8988  }
    8989  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    8990  {
    8991  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    8992  return VK_ERROR_VALIDATION_FAILED_EXT;
    8993  }
    8994  }
    8995  }
    8996 
    8997  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8998  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    8999  {
    9000  const VmaSuballocation& suballoc = suballocations2nd[i];
    9001  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9002  {
    9003  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    9004  {
    9005  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    9006  return VK_ERROR_VALIDATION_FAILED_EXT;
    9007  }
    9008  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    9009  {
    9010  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9011  return VK_ERROR_VALIDATION_FAILED_EXT;
    9012  }
    9013  }
    9014  }
    9015 
    9016  return VK_SUCCESS;
    9017 }
    9018 
    9019 void VmaBlockMetadata_Linear::Alloc(
    9020  const VmaAllocationRequest& request,
    9021  VmaSuballocationType type,
    9022  VkDeviceSize allocSize,
    9023  bool upperAddress,
    9024  VmaAllocation hAllocation)
    9025 {
    9026  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
    9027 
    9028  if(upperAddress)
    9029  {
    9030  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
    9031  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
    9032  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9033  suballocations2nd.push_back(newSuballoc);
    9034  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
    9035  }
    9036  else
    9037  {
    9038  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9039 
    9040  // First allocation.
    9041  if(suballocations1st.empty())
    9042  {
    9043  suballocations1st.push_back(newSuballoc);
    9044  }
    9045  else
    9046  {
    9047  // New allocation at the end of 1st vector.
    9048  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
    9049  {
    9050  // Check if it fits before the end of the block.
    9051  VMA_ASSERT(request.offset + allocSize <= GetSize());
    9052  suballocations1st.push_back(newSuballoc);
    9053  }
    9054  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
    9055  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
    9056  {
    9057  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9058 
    9059  switch(m_2ndVectorMode)
    9060  {
    9061  case SECOND_VECTOR_EMPTY:
    9062  // First allocation from second part ring buffer.
    9063  VMA_ASSERT(suballocations2nd.empty());
    9064  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
    9065  break;
    9066  case SECOND_VECTOR_RING_BUFFER:
    9067  // 2-part ring buffer is already started.
    9068  VMA_ASSERT(!suballocations2nd.empty());
    9069  break;
    9070  case SECOND_VECTOR_DOUBLE_STACK:
    9071  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
    9072  break;
    9073  default:
    9074  VMA_ASSERT(0);
    9075  }
    9076 
    9077  suballocations2nd.push_back(newSuballoc);
    9078  }
    9079  else
    9080  {
    9081  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
    9082  }
    9083  }
    9084  }
    9085 
    9086  m_SumFreeSize -= newSuballoc.size;
    9087 }
    9088 
    9089 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
    9090 {
    9091  FreeAtOffset(allocation->GetOffset());
    9092 }
    9093 
    9094 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
    9095 {
    9096  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9097  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9098 
    9099  if(!suballocations1st.empty())
    9100  {
    9101  // First allocation: Mark it as next empty at the beginning.
    9102  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    9103  if(firstSuballoc.offset == offset)
    9104  {
    9105  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9106  firstSuballoc.hAllocation = VK_NULL_HANDLE;
    9107  m_SumFreeSize += firstSuballoc.size;
    9108  ++m_1stNullItemsBeginCount;
    9109  CleanupAfterFree();
    9110  return;
    9111  }
    9112  }
    9113 
    9114  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
    9115  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
    9116  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9117  {
    9118  VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9119  if(lastSuballoc.offset == offset)
    9120  {
    9121  m_SumFreeSize += lastSuballoc.size;
    9122  suballocations2nd.pop_back();
    9123  CleanupAfterFree();
    9124  return;
    9125  }
    9126  }
    9127  // Last allocation in 1st vector.
    9128  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
    9129  {
    9130  VmaSuballocation& lastSuballoc = suballocations1st.back();
    9131  if(lastSuballoc.offset == offset)
    9132  {
    9133  m_SumFreeSize += lastSuballoc.size;
    9134  suballocations1st.pop_back();
    9135  CleanupAfterFree();
    9136  return;
    9137  }
    9138  }
    9139 
    9140  // Item from the middle of 1st vector.
    9141  {
    9142  VmaSuballocation refSuballoc;
    9143  refSuballoc.offset = offset;
    9144  // Rest of members stays uninitialized intentionally for better performance.
    9145  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
    9146  suballocations1st.begin() + m_1stNullItemsBeginCount,
    9147  suballocations1st.end(),
    9148  refSuballoc);
    9149  if(it != suballocations1st.end())
    9150  {
    9151  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9152  it->hAllocation = VK_NULL_HANDLE;
    9153  ++m_1stNullItemsMiddleCount;
    9154  m_SumFreeSize += it->size;
    9155  CleanupAfterFree();
    9156  return;
    9157  }
    9158  }
    9159 
    9160  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
    9161  {
    9162  // Item from the middle of 2nd vector.
    9163  VmaSuballocation refSuballoc;
    9164  refSuballoc.offset = offset;
    9165  // Rest of members stays uninitialized intentionally for better performance.
    9166  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
    9167  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
    9168  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
    9169  if(it != suballocations2nd.end())
    9170  {
    9171  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9172  it->hAllocation = VK_NULL_HANDLE;
    9173  ++m_2ndNullItemsCount;
    9174  m_SumFreeSize += it->size;
    9175  CleanupAfterFree();
    9176  return;
    9177  }
    9178  }
    9179 
    9180  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
    9181 }
    9182 
    9183 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
    9184 {
    9185  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9186  const size_t suballocCount = AccessSuballocations1st().size();
    9187  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
    9188 }
    9189 
    9190 void VmaBlockMetadata_Linear::CleanupAfterFree()
    9191 {
    9192  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9193  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9194 
    9195  if(IsEmpty())
    9196  {
    9197  suballocations1st.clear();
    9198  suballocations2nd.clear();
    9199  m_1stNullItemsBeginCount = 0;
    9200  m_1stNullItemsMiddleCount = 0;
    9201  m_2ndNullItemsCount = 0;
    9202  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9203  }
    9204  else
    9205  {
    9206  const size_t suballoc1stCount = suballocations1st.size();
    9207  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9208  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
    9209 
    9210  // Find more null items at the beginning of 1st vector.
    9211  while(m_1stNullItemsBeginCount < suballoc1stCount &&
    9212  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9213  {
    9214  ++m_1stNullItemsBeginCount;
    9215  --m_1stNullItemsMiddleCount;
    9216  }
    9217 
    9218  // Find more null items at the end of 1st vector.
    9219  while(m_1stNullItemsMiddleCount > 0 &&
    9220  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
    9221  {
    9222  --m_1stNullItemsMiddleCount;
    9223  suballocations1st.pop_back();
    9224  }
    9225 
    9226  // Find more null items at the end of 2nd vector.
    9227  while(m_2ndNullItemsCount > 0 &&
    9228  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
    9229  {
    9230  --m_2ndNullItemsCount;
    9231  suballocations2nd.pop_back();
    9232  }
    9233 
    9234  if(ShouldCompact1st())
    9235  {
    9236  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
    9237  size_t srcIndex = m_1stNullItemsBeginCount;
    9238  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
    9239  {
    9240  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
    9241  {
    9242  ++srcIndex;
    9243  }
    9244  if(dstIndex != srcIndex)
    9245  {
    9246  suballocations1st[dstIndex] = suballocations1st[srcIndex];
    9247  }
    9248  ++srcIndex;
    9249  }
    9250  suballocations1st.resize(nonNullItemCount);
    9251  m_1stNullItemsBeginCount = 0;
    9252  m_1stNullItemsMiddleCount = 0;
    9253  }
    9254 
    9255  // 2nd vector became empty.
    9256  if(suballocations2nd.empty())
    9257  {
    9258  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9259  }
    9260 
    9261  // 1st vector became empty.
    9262  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
    9263  {
    9264  suballocations1st.clear();
    9265  m_1stNullItemsBeginCount = 0;
    9266 
    9267  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9268  {
    9269  // Swap 1st with 2nd. Now 2nd is empty.
    9270  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9271  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
    9272  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
    9273  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9274  {
    9275  ++m_1stNullItemsBeginCount;
    9276  --m_1stNullItemsMiddleCount;
    9277  }
    9278  m_2ndNullItemsCount = 0;
    9279  m_1stVectorIndex ^= 1;
    9280  }
    9281  }
    9282  }
    9283 
    9284  VMA_HEAVY_ASSERT(Validate());
    9285 }
    9286 
    9287 
    9289 // class VmaBlockMetadata_Buddy
    9290 
    9291 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
    9292  VmaBlockMetadata(hAllocator),
    9293  m_Root(VMA_NULL),
    9294  m_AllocationCount(0),
    9295  m_FreeCount(1),
    9296  m_SumFreeSize(0)
    9297 {
    9298  memset(m_FreeList, 0, sizeof(m_FreeList));
    9299 }
    9300 
    9301 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
    9302 {
    9303  DeleteNode(m_Root);
    9304 }
    9305 
    9306 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
    9307 {
    9308  VmaBlockMetadata::Init(size);
    9309 
    9310  m_UsableSize = VmaPrevPow2(size);
    9311  m_SumFreeSize = m_UsableSize;
    9312 
    9313  // Calculate m_LevelCount.
    9314  m_LevelCount = 1;
    9315  while(m_LevelCount < MAX_LEVELS &&
    9316  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
    9317  {
    9318  ++m_LevelCount;
    9319  }
    9320 
    9321  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
    9322  rootNode->offset = 0;
    9323  rootNode->type = Node::TYPE_FREE;
    9324  rootNode->parent = VMA_NULL;
    9325  rootNode->buddy = VMA_NULL;
    9326 
    9327  m_Root = rootNode;
    9328  AddToFreeListFront(0, rootNode);
    9329 }
    9330 
    9331 bool VmaBlockMetadata_Buddy::Validate() const
    9332 {
    9333  // Validate tree.
    9334  ValidationContext ctx;
    9335  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
    9336  {
    9337  VMA_VALIDATE(false && "ValidateNode failed.");
    9338  }
    9339  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
    9340  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
    9341 
    9342  // Validate free node lists.
    9343  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9344  {
    9345  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
    9346  m_FreeList[level].front->free.prev == VMA_NULL);
    9347 
    9348  for(Node* node = m_FreeList[level].front;
    9349  node != VMA_NULL;
    9350  node = node->free.next)
    9351  {
    9352  VMA_VALIDATE(node->type == Node::TYPE_FREE);
    9353 
    9354  if(node->free.next == VMA_NULL)
    9355  {
    9356  VMA_VALIDATE(m_FreeList[level].back == node);
    9357  }
    9358  else
    9359  {
    9360  VMA_VALIDATE(node->free.next->free.prev == node);
    9361  }
    9362  }
    9363  }
    9364 
    9365  // Validate that free lists ar higher levels are empty.
    9366  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
    9367  {
    9368  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
    9369  }
    9370 
    9371  return true;
    9372 }
    9373 
    9374 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
    9375 {
    9376  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9377  {
    9378  if(m_FreeList[level].front != VMA_NULL)
    9379  {
    9380  return LevelToNodeSize(level);
    9381  }
    9382  }
    9383  return 0;
    9384 }
    9385 
    9386 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    9387 {
    9388  const VkDeviceSize unusableSize = GetUnusableSize();
    9389 
    9390  outInfo.blockCount = 1;
    9391 
    9392  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
    9393  outInfo.usedBytes = outInfo.unusedBytes = 0;
    9394 
    9395  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
    9396  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
    9397  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
    9398 
    9399  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
    9400 
    9401  if(unusableSize > 0)
    9402  {
    9403  ++outInfo.unusedRangeCount;
    9404  outInfo.unusedBytes += unusableSize;
    9405  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
    9406  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
    9407  }
    9408 }
    9409 
    9410 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
    9411 {
    9412  const VkDeviceSize unusableSize = GetUnusableSize();
    9413 
    9414  inoutStats.size += GetSize();
    9415  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
    9416  inoutStats.allocationCount += m_AllocationCount;
    9417  inoutStats.unusedRangeCount += m_FreeCount;
    9418  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    9419 
    9420  if(unusableSize > 0)
    9421  {
    9422  ++inoutStats.unusedRangeCount;
    9423  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
    9424  }
    9425 }
    9426 
    9427 #if VMA_STATS_STRING_ENABLED
    9428 
    9429 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
    9430 {
    9431  // TODO optimize
    9432  VmaStatInfo stat;
    9433  CalcAllocationStatInfo(stat);
    9434 
    9435  PrintDetailedMap_Begin(
    9436  json,
    9437  stat.unusedBytes,
    9438  stat.allocationCount,
    9439  stat.unusedRangeCount);
    9440 
    9441  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
    9442 
    9443  const VkDeviceSize unusableSize = GetUnusableSize();
    9444  if(unusableSize > 0)
    9445  {
    9446  PrintDetailedMap_UnusedRange(json,
    9447  m_UsableSize, // offset
    9448  unusableSize); // size
    9449  }
    9450 
    9451  PrintDetailedMap_End(json);
    9452 }
    9453 
    9454 #endif // #if VMA_STATS_STRING_ENABLED
    9455 
    9456 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
    9457  uint32_t currentFrameIndex,
    9458  uint32_t frameInUseCount,
    9459  VkDeviceSize bufferImageGranularity,
    9460  VkDeviceSize allocSize,
    9461  VkDeviceSize allocAlignment,
    9462  bool upperAddress,
    9463  VmaSuballocationType allocType,
    9464  bool canMakeOtherLost,
    9465  uint32_t strategy,
    9466  VmaAllocationRequest* pAllocationRequest)
    9467 {
    9468  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
    9469 
    9470  // Simple way to respect bufferImageGranularity. May be optimized some day.
    9471  // Whenever it might be an OPTIMAL image...
    9472  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
    9473  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    9474  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
    9475  {
    9476  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
    9477  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
    9478  }
    9479 
    9480  if(allocSize > m_UsableSize)
    9481  {
    9482  return false;
    9483  }
    9484 
    9485  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9486  for(uint32_t level = targetLevel + 1; level--; )
    9487  {
    9488  for(Node* freeNode = m_FreeList[level].front;
    9489  freeNode != VMA_NULL;
    9490  freeNode = freeNode->free.next)
    9491  {
    9492  if(freeNode->offset % allocAlignment == 0)
    9493  {
    9494  pAllocationRequest->offset = freeNode->offset;
    9495  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
    9496  pAllocationRequest->sumItemSize = 0;
    9497  pAllocationRequest->itemsToMakeLostCount = 0;
    9498  pAllocationRequest->customData = (void*)(uintptr_t)level;
    9499  return true;
    9500  }
    9501  }
    9502  }
    9503 
    9504  return false;
    9505 }
    9506 
    9507 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
    9508  uint32_t currentFrameIndex,
    9509  uint32_t frameInUseCount,
    9510  VmaAllocationRequest* pAllocationRequest)
    9511 {
    9512  /*
    9513  Lost allocations are not supported in buddy allocator at the moment.
    9514  Support might be added in the future.
    9515  */
    9516  return pAllocationRequest->itemsToMakeLostCount == 0;
    9517 }
    9518 
    9519 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    9520 {
    9521  /*
    9522  Lost allocations are not supported in buddy allocator at the moment.
    9523  Support might be added in the future.
    9524  */
    9525  return 0;
    9526 }
    9527 
    9528 void VmaBlockMetadata_Buddy::Alloc(
    9529  const VmaAllocationRequest& request,
    9530  VmaSuballocationType type,
    9531  VkDeviceSize allocSize,
    9532  bool upperAddress,
    9533  VmaAllocation hAllocation)
    9534 {
    9535  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9536  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
    9537 
    9538  Node* currNode = m_FreeList[currLevel].front;
    9539  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9540  while(currNode->offset != request.offset)
    9541  {
    9542  currNode = currNode->free.next;
    9543  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9544  }
    9545 
    9546  // Go down, splitting free nodes.
    9547  while(currLevel < targetLevel)
    9548  {
    9549  // currNode is already first free node at currLevel.
    9550  // Remove it from list of free nodes at this currLevel.
    9551  RemoveFromFreeList(currLevel, currNode);
    9552 
    9553  const uint32_t childrenLevel = currLevel + 1;
    9554 
    9555  // Create two free sub-nodes.
    9556  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
    9557  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
    9558 
    9559  leftChild->offset = currNode->offset;
    9560  leftChild->type = Node::TYPE_FREE;
    9561  leftChild->parent = currNode;
    9562  leftChild->buddy = rightChild;
    9563 
    9564  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
    9565  rightChild->type = Node::TYPE_FREE;
    9566  rightChild->parent = currNode;
    9567  rightChild->buddy = leftChild;
    9568 
    9569  // Convert current currNode to split type.
    9570  currNode->type = Node::TYPE_SPLIT;
    9571  currNode->split.leftChild = leftChild;
    9572 
    9573  // Add child nodes to free list. Order is important!
    9574  AddToFreeListFront(childrenLevel, rightChild);
    9575  AddToFreeListFront(childrenLevel, leftChild);
    9576 
    9577  ++m_FreeCount;
    9578  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
    9579  ++currLevel;
    9580  currNode = m_FreeList[currLevel].front;
    9581 
    9582  /*
    9583  We can be sure that currNode, as left child of node previously split,
    9584  also fullfills the alignment requirement.
    9585  */
    9586  }
    9587 
    9588  // Remove from free list.
    9589  VMA_ASSERT(currLevel == targetLevel &&
    9590  currNode != VMA_NULL &&
    9591  currNode->type == Node::TYPE_FREE);
    9592  RemoveFromFreeList(currLevel, currNode);
    9593 
    9594  // Convert to allocation node.
    9595  currNode->type = Node::TYPE_ALLOCATION;
    9596  currNode->allocation.alloc = hAllocation;
    9597 
    9598  ++m_AllocationCount;
    9599  --m_FreeCount;
    9600  m_SumFreeSize -= allocSize;
    9601 }
    9602 
    9603 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
    9604 {
    9605  if(node->type == Node::TYPE_SPLIT)
    9606  {
    9607  DeleteNode(node->split.leftChild->buddy);
    9608  DeleteNode(node->split.leftChild);
    9609  }
    9610 
    9611  vma_delete(GetAllocationCallbacks(), node);
    9612 }
    9613 
    9614 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
    9615 {
    9616  VMA_VALIDATE(level < m_LevelCount);
    9617  VMA_VALIDATE(curr->parent == parent);
    9618  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
    9619  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
    9620  switch(curr->type)
    9621  {
    9622  case Node::TYPE_FREE:
    9623  // curr->free.prev, next are validated separately.
    9624  ctx.calculatedSumFreeSize += levelNodeSize;
    9625  ++ctx.calculatedFreeCount;
    9626  break;
    9627  case Node::TYPE_ALLOCATION:
    9628  ++ctx.calculatedAllocationCount;
    9629  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
    9630  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
    9631  break;
    9632  case Node::TYPE_SPLIT:
    9633  {
    9634  const uint32_t childrenLevel = level + 1;
    9635  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
    9636  const Node* const leftChild = curr->split.leftChild;
    9637  VMA_VALIDATE(leftChild != VMA_NULL);
    9638  VMA_VALIDATE(leftChild->offset == curr->offset);
    9639  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
    9640  {
    9641  VMA_VALIDATE(false && "ValidateNode for left child failed.");
    9642  }
    9643  const Node* const rightChild = leftChild->buddy;
    9644  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
    9645  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
    9646  {
    9647  VMA_VALIDATE(false && "ValidateNode for right child failed.");
    9648  }
    9649  }
    9650  break;
    9651  default:
    9652  return false;
    9653  }
    9654 
    9655  return true;
    9656 }
    9657 
    9658 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
    9659 {
    9660  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
    9661  uint32_t level = 0;
    9662  VkDeviceSize currLevelNodeSize = m_UsableSize;
    9663  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
    9664  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
    9665  {
    9666  ++level;
    9667  currLevelNodeSize = nextLevelNodeSize;
    9668  nextLevelNodeSize = currLevelNodeSize >> 1;
    9669  }
    9670  return level;
    9671 }
    9672 
    9673 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
    9674 {
    9675  // Find node and level.
    9676  Node* node = m_Root;
    9677  VkDeviceSize nodeOffset = 0;
    9678  uint32_t level = 0;
    9679  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
    9680  while(node->type == Node::TYPE_SPLIT)
    9681  {
    9682  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
    9683  if(offset < nodeOffset + nextLevelSize)
    9684  {
    9685  node = node->split.leftChild;
    9686  }
    9687  else
    9688  {
    9689  node = node->split.leftChild->buddy;
    9690  nodeOffset += nextLevelSize;
    9691  }
    9692  ++level;
    9693  levelNodeSize = nextLevelSize;
    9694  }
    9695 
    9696  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
    9697  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
    9698 
    9699  ++m_FreeCount;
    9700  --m_AllocationCount;
    9701  m_SumFreeSize += alloc->GetSize();
    9702 
    9703  node->type = Node::TYPE_FREE;
    9704 
    9705  // Join free nodes if possible.
    9706  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
    9707  {
    9708  RemoveFromFreeList(level, node->buddy);
    9709  Node* const parent = node->parent;
    9710 
    9711  vma_delete(GetAllocationCallbacks(), node->buddy);
    9712  vma_delete(GetAllocationCallbacks(), node);
    9713  parent->type = Node::TYPE_FREE;
    9714 
    9715  node = parent;
    9716  --level;
    9717  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
    9718  --m_FreeCount;
    9719  }
    9720 
    9721  AddToFreeListFront(level, node);
    9722 }
    9723 
    9724 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
    9725 {
    9726  switch(node->type)
    9727  {
    9728  case Node::TYPE_FREE:
    9729  ++outInfo.unusedRangeCount;
    9730  outInfo.unusedBytes += levelNodeSize;
    9731  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
    9732  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
    9733  break;
    9734  case Node::TYPE_ALLOCATION:
    9735  {
    9736  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    9737  ++outInfo.allocationCount;
    9738  outInfo.usedBytes += allocSize;
    9739  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
    9740  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
    9741 
    9742  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
    9743  if(unusedRangeSize > 0)
    9744  {
    9745  ++outInfo.unusedRangeCount;
    9746  outInfo.unusedBytes += unusedRangeSize;
    9747  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9748  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9749  }
    9750  }
    9751  break;
    9752  case Node::TYPE_SPLIT:
    9753  {
    9754  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    9755  const Node* const leftChild = node->split.leftChild;
    9756  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
    9757  const Node* const rightChild = leftChild->buddy;
    9758  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
    9759  }
    9760  break;
    9761  default:
    9762  VMA_ASSERT(0);
    9763  }
    9764 }
    9765 
    9766 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
    9767 {
    9768  VMA_ASSERT(node->type == Node::TYPE_FREE);
    9769 
    9770  // List is empty.
    9771  Node* const frontNode = m_FreeList[level].front;
    9772  if(frontNode == VMA_NULL)
    9773  {
    9774  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
    9775  node->free.prev = node->free.next = VMA_NULL;
    9776  m_FreeList[level].front = m_FreeList[level].back = node;
    9777  }
    9778  else
    9779  {
    9780  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
    9781  node->free.prev = VMA_NULL;
    9782  node->free.next = frontNode;
    9783  frontNode->free.prev = node;
    9784  m_FreeList[level].front = node;
    9785  }
    9786 }
    9787 
    9788 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
    9789 {
    9790  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
    9791 
    9792  // It is at the front.
    9793  if(node->free.prev == VMA_NULL)
    9794  {
    9795  VMA_ASSERT(m_FreeList[level].front == node);
    9796  m_FreeList[level].front = node->free.next;
    9797  }
    9798  else
    9799  {
    9800  Node* const prevFreeNode = node->free.prev;
    9801  VMA_ASSERT(prevFreeNode->free.next == node);
    9802  prevFreeNode->free.next = node->free.next;
    9803  }
    9804 
    9805  // It is at the back.
    9806  if(node->free.next == VMA_NULL)
    9807  {
    9808  VMA_ASSERT(m_FreeList[level].back == node);
    9809  m_FreeList[level].back = node->free.prev;
    9810  }
    9811  else
    9812  {
    9813  Node* const nextFreeNode = node->free.next;
    9814  VMA_ASSERT(nextFreeNode->free.prev == node);
    9815  nextFreeNode->free.prev = node->free.prev;
    9816  }
    9817 }
    9818 
    9819 #if VMA_STATS_STRING_ENABLED
    9820 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
    9821 {
    9822  switch(node->type)
    9823  {
    9824  case Node::TYPE_FREE:
    9825  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
    9826  break;
    9827  case Node::TYPE_ALLOCATION:
    9828  {
    9829  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
    9830  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    9831  if(allocSize < levelNodeSize)
    9832  {
    9833  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
    9834  }
    9835  }
    9836  break;
    9837  case Node::TYPE_SPLIT:
    9838  {
    9839  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    9840  const Node* const leftChild = node->split.leftChild;
    9841  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
    9842  const Node* const rightChild = leftChild->buddy;
    9843  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
    9844  }
    9845  break;
    9846  default:
    9847  VMA_ASSERT(0);
    9848  }
    9849 }
    9850 #endif // #if VMA_STATS_STRING_ENABLED
    9851 
    9852 
    9854 // class VmaDeviceMemoryBlock
    9855 
    9856 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    9857  m_pMetadata(VMA_NULL),
    9858  m_MemoryTypeIndex(UINT32_MAX),
    9859  m_Id(0),
    9860  m_hMemory(VK_NULL_HANDLE),
    9861  m_MapCount(0),
    9862  m_pMappedData(VMA_NULL)
    9863 {
    9864 }
    9865 
    9866 void VmaDeviceMemoryBlock::Init(
    9867  VmaAllocator hAllocator,
    9868  uint32_t newMemoryTypeIndex,
    9869  VkDeviceMemory newMemory,
    9870  VkDeviceSize newSize,
    9871  uint32_t id,
    9872  uint32_t algorithm)
    9873 {
    9874  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    9875 
    9876  m_MemoryTypeIndex = newMemoryTypeIndex;
    9877  m_Id = id;
    9878  m_hMemory = newMemory;
    9879 
    9880  switch(algorithm)
    9881  {
    9883  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
    9884  break;
    9886  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
    9887  break;
    9888  default:
    9889  VMA_ASSERT(0);
    9890  // Fall-through.
    9891  case 0:
    9892  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
    9893  }
    9894  m_pMetadata->Init(newSize);
    9895 }
    9896 
    9897 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    9898 {
    9899  // This is the most important assert in the entire library.
    9900  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    9901  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    9902 
    9903  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    9904  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
    9905  m_hMemory = VK_NULL_HANDLE;
    9906 
    9907  vma_delete(allocator, m_pMetadata);
    9908  m_pMetadata = VMA_NULL;
    9909 }
    9910 
    9911 bool VmaDeviceMemoryBlock::Validate() const
    9912 {
    9913  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
    9914  (m_pMetadata->GetSize() != 0));
    9915 
    9916  return m_pMetadata->Validate();
    9917 }
    9918 
    9919 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
    9920 {
    9921  void* pData = nullptr;
    9922  VkResult res = Map(hAllocator, 1, &pData);
    9923  if(res != VK_SUCCESS)
    9924  {
    9925  return res;
    9926  }
    9927 
    9928  res = m_pMetadata->CheckCorruption(pData);
    9929 
    9930  Unmap(hAllocator, 1);
    9931 
    9932  return res;
    9933 }
    9934 
    9935 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
    9936 {
    9937  if(count == 0)
    9938  {
    9939  return VK_SUCCESS;
    9940  }
    9941 
    9942  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    9943  if(m_MapCount != 0)
    9944  {
    9945  m_MapCount += count;
    9946  VMA_ASSERT(m_pMappedData != VMA_NULL);
    9947  if(ppData != VMA_NULL)
    9948  {
    9949  *ppData = m_pMappedData;
    9950  }
    9951  return VK_SUCCESS;
    9952  }
    9953  else
    9954  {
    9955  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    9956  hAllocator->m_hDevice,
    9957  m_hMemory,
    9958  0, // offset
    9959  VK_WHOLE_SIZE,
    9960  0, // flags
    9961  &m_pMappedData);
    9962  if(result == VK_SUCCESS)
    9963  {
    9964  if(ppData != VMA_NULL)
    9965  {
    9966  *ppData = m_pMappedData;
    9967  }
    9968  m_MapCount = count;
    9969  }
    9970  return result;
    9971  }
    9972 }
    9973 
    9974 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
    9975 {
    9976  if(count == 0)
    9977  {
    9978  return;
    9979  }
    9980 
    9981  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    9982  if(m_MapCount >= count)
    9983  {
    9984  m_MapCount -= count;
    9985  if(m_MapCount == 0)
    9986  {
    9987  m_pMappedData = VMA_NULL;
    9988  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
    9989  }
    9990  }
    9991  else
    9992  {
    9993  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    9994  }
    9995 }
    9996 
    9997 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    9998 {
    9999  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10000  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10001 
    10002  void* pData;
    10003  VkResult res = Map(hAllocator, 1, &pData);
    10004  if(res != VK_SUCCESS)
    10005  {
    10006  return res;
    10007  }
    10008 
    10009  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
    10010  VmaWriteMagicValue(pData, allocOffset + allocSize);
    10011 
    10012  Unmap(hAllocator, 1);
    10013 
    10014  return VK_SUCCESS;
    10015 }
    10016 
    10017 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10018 {
    10019  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10020  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10021 
    10022  void* pData;
    10023  VkResult res = Map(hAllocator, 1, &pData);
    10024  if(res != VK_SUCCESS)
    10025  {
    10026  return res;
    10027  }
    10028 
    10029  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
    10030  {
    10031  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
    10032  }
    10033  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
    10034  {
    10035  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
    10036  }
    10037 
    10038  Unmap(hAllocator, 1);
    10039 
    10040  return VK_SUCCESS;
    10041 }
    10042 
    10043 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
    10044  const VmaAllocator hAllocator,
    10045  const VmaAllocation hAllocation,
    10046  VkBuffer hBuffer)
    10047 {
    10048  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10049  hAllocation->GetBlock() == this);
    10050  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10051  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10052  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
    10053  hAllocator->m_hDevice,
    10054  hBuffer,
    10055  m_hMemory,
    10056  hAllocation->GetOffset());
    10057 }
    10058 
    10059 VkResult VmaDeviceMemoryBlock::BindImageMemory(
    10060  const VmaAllocator hAllocator,
    10061  const VmaAllocation hAllocation,
    10062  VkImage hImage)
    10063 {
    10064  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10065  hAllocation->GetBlock() == this);
    10066  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10067  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10068  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
    10069  hAllocator->m_hDevice,
    10070  hImage,
    10071  m_hMemory,
    10072  hAllocation->GetOffset());
    10073 }
    10074 
    10075 static void InitStatInfo(VmaStatInfo& outInfo)
    10076 {
    10077  memset(&outInfo, 0, sizeof(outInfo));
    10078  outInfo.allocationSizeMin = UINT64_MAX;
    10079  outInfo.unusedRangeSizeMin = UINT64_MAX;
    10080 }
    10081 
    10082 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    10083 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    10084 {
    10085  inoutInfo.blockCount += srcInfo.blockCount;
    10086  inoutInfo.allocationCount += srcInfo.allocationCount;
    10087  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    10088  inoutInfo.usedBytes += srcInfo.usedBytes;
    10089  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    10090  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    10091  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    10092  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    10093  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    10094 }
    10095 
    10096 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    10097 {
    10098  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    10099  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    10100  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    10101  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    10102 }
    10103 
    10104 VmaPool_T::VmaPool_T(
    10105  VmaAllocator hAllocator,
    10106  const VmaPoolCreateInfo& createInfo,
    10107  VkDeviceSize preferredBlockSize) :
    10108  m_BlockVector(
    10109  hAllocator,
    10110  createInfo.memoryTypeIndex,
    10111  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
    10112  createInfo.minBlockCount,
    10113  createInfo.maxBlockCount,
    10114  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    10115  createInfo.frameInUseCount,
    10116  true, // isCustomPool
    10117  createInfo.blockSize != 0, // explicitBlockSize
    10118  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
    10119  m_Id(0)
    10120 {
    10121 }
    10122 
    10123 VmaPool_T::~VmaPool_T()
    10124 {
    10125 }
    10126 
    10127 #if VMA_STATS_STRING_ENABLED
    10128 
    10129 #endif // #if VMA_STATS_STRING_ENABLED
    10130 
    10131 VmaBlockVector::VmaBlockVector(
    10132  VmaAllocator hAllocator,
    10133  uint32_t memoryTypeIndex,
    10134  VkDeviceSize preferredBlockSize,
    10135  size_t minBlockCount,
    10136  size_t maxBlockCount,
    10137  VkDeviceSize bufferImageGranularity,
    10138  uint32_t frameInUseCount,
    10139  bool isCustomPool,
    10140  bool explicitBlockSize,
    10141  uint32_t algorithm) :
    10142  m_hAllocator(hAllocator),
    10143  m_MemoryTypeIndex(memoryTypeIndex),
    10144  m_PreferredBlockSize(preferredBlockSize),
    10145  m_MinBlockCount(minBlockCount),
    10146  m_MaxBlockCount(maxBlockCount),
    10147  m_BufferImageGranularity(bufferImageGranularity),
    10148  m_FrameInUseCount(frameInUseCount),
    10149  m_IsCustomPool(isCustomPool),
    10150  m_ExplicitBlockSize(explicitBlockSize),
    10151  m_Algorithm(algorithm),
    10152  m_HasEmptyBlock(false),
    10153  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    10154  m_pDefragmentator(VMA_NULL),
    10155  m_NextBlockId(0)
    10156 {
    10157 }
    10158 
    10159 VmaBlockVector::~VmaBlockVector()
    10160 {
    10161  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
    10162 
    10163  for(size_t i = m_Blocks.size(); i--; )
    10164  {
    10165  m_Blocks[i]->Destroy(m_hAllocator);
    10166  vma_delete(m_hAllocator, m_Blocks[i]);
    10167  }
    10168 }
    10169 
    10170 VkResult VmaBlockVector::CreateMinBlocks()
    10171 {
    10172  for(size_t i = 0; i < m_MinBlockCount; ++i)
    10173  {
    10174  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    10175  if(res != VK_SUCCESS)
    10176  {
    10177  return res;
    10178  }
    10179  }
    10180  return VK_SUCCESS;
    10181 }
    10182 
    10183 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    10184 {
    10185  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10186 
    10187  const size_t blockCount = m_Blocks.size();
    10188 
    10189  pStats->size = 0;
    10190  pStats->unusedSize = 0;
    10191  pStats->allocationCount = 0;
    10192  pStats->unusedRangeCount = 0;
    10193  pStats->unusedRangeSizeMax = 0;
    10194  pStats->blockCount = blockCount;
    10195 
    10196  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    10197  {
    10198  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10199  VMA_ASSERT(pBlock);
    10200  VMA_HEAVY_ASSERT(pBlock->Validate());
    10201  pBlock->m_pMetadata->AddPoolStats(*pStats);
    10202  }
    10203 }
    10204 
    10205 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
    10206 {
    10207  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    10208  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
    10209  (VMA_DEBUG_MARGIN > 0) &&
    10210  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
    10211 }
    10212 
    10213 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    10214 
    10215 VkResult VmaBlockVector::Allocate(
    10216  VmaPool hCurrentPool,
    10217  uint32_t currentFrameIndex,
    10218  VkDeviceSize size,
    10219  VkDeviceSize alignment,
    10220  const VmaAllocationCreateInfo& createInfo,
    10221  VmaSuballocationType suballocType,
    10222  VmaAllocation* pAllocation)
    10223 {
    10224  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10225  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    10226  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10227  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10228  const bool canCreateNewBlock =
    10229  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    10230  (m_Blocks.size() < m_MaxBlockCount);
    10231  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
    10232 
    10233  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
    10234  // Which in turn is available only when maxBlockCount = 1.
    10235  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
    10236  {
    10237  canMakeOtherLost = false;
    10238  }
    10239 
    10240  // Upper address can only be used with linear allocator and within single memory block.
    10241  if(isUpperAddress &&
    10242  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
    10243  {
    10244  return VK_ERROR_FEATURE_NOT_PRESENT;
    10245  }
    10246 
    10247  // Validate strategy.
    10248  switch(strategy)
    10249  {
    10250  case 0:
    10252  break;
    10256  break;
    10257  default:
    10258  return VK_ERROR_FEATURE_NOT_PRESENT;
    10259  }
    10260 
    10261  // Early reject: requested allocation size is larger that maximum block size for this block vector.
    10262  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
    10263  {
    10264  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10265  }
    10266 
    10267  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10268 
    10269  /*
    10270  Under certain condition, this whole section can be skipped for optimization, so
    10271  we move on directly to trying to allocate with canMakeOtherLost. That's the case
    10272  e.g. for custom pools with linear algorithm.
    10273  */
    10274  if(!canMakeOtherLost || canCreateNewBlock)
    10275  {
    10276  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    10277  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
    10279 
    10280  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10281  {
    10282  // Use only last block.
    10283  if(!m_Blocks.empty())
    10284  {
    10285  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
    10286  VMA_ASSERT(pCurrBlock);
    10287  VkResult res = AllocateFromBlock(
    10288  pCurrBlock,
    10289  hCurrentPool,
    10290  currentFrameIndex,
    10291  size,
    10292  alignment,
    10293  allocFlagsCopy,
    10294  createInfo.pUserData,
    10295  suballocType,
    10296  strategy,
    10297  pAllocation);
    10298  if(res == VK_SUCCESS)
    10299  {
    10300  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
    10301  return VK_SUCCESS;
    10302  }
    10303  }
    10304  }
    10305  else
    10306  {
    10308  {
    10309  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10310  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10311  {
    10312  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10313  VMA_ASSERT(pCurrBlock);
    10314  VkResult res = AllocateFromBlock(
    10315  pCurrBlock,
    10316  hCurrentPool,
    10317  currentFrameIndex,
    10318  size,
    10319  alignment,
    10320  allocFlagsCopy,
    10321  createInfo.pUserData,
    10322  suballocType,
    10323  strategy,
    10324  pAllocation);
    10325  if(res == VK_SUCCESS)
    10326  {
    10327  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10328  return VK_SUCCESS;
    10329  }
    10330  }
    10331  }
    10332  else // WORST_FIT, FIRST_FIT
    10333  {
    10334  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10335  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10336  {
    10337  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10338  VMA_ASSERT(pCurrBlock);
    10339  VkResult res = AllocateFromBlock(
    10340  pCurrBlock,
    10341  hCurrentPool,
    10342  currentFrameIndex,
    10343  size,
    10344  alignment,
    10345  allocFlagsCopy,
    10346  createInfo.pUserData,
    10347  suballocType,
    10348  strategy,
    10349  pAllocation);
    10350  if(res == VK_SUCCESS)
    10351  {
    10352  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10353  return VK_SUCCESS;
    10354  }
    10355  }
    10356  }
    10357  }
    10358 
    10359  // 2. Try to create new block.
    10360  if(canCreateNewBlock)
    10361  {
    10362  // Calculate optimal size for new block.
    10363  VkDeviceSize newBlockSize = m_PreferredBlockSize;
    10364  uint32_t newBlockSizeShift = 0;
    10365  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
    10366 
    10367  if(!m_ExplicitBlockSize)
    10368  {
    10369  // Allocate 1/8, 1/4, 1/2 as first blocks.
    10370  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
    10371  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
    10372  {
    10373  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10374  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
    10375  {
    10376  newBlockSize = smallerNewBlockSize;
    10377  ++newBlockSizeShift;
    10378  }
    10379  else
    10380  {
    10381  break;
    10382  }
    10383  }
    10384  }
    10385 
    10386  size_t newBlockIndex = 0;
    10387  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
    10388  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
    10389  if(!m_ExplicitBlockSize)
    10390  {
    10391  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
    10392  {
    10393  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10394  if(smallerNewBlockSize >= size)
    10395  {
    10396  newBlockSize = smallerNewBlockSize;
    10397  ++newBlockSizeShift;
    10398  res = CreateBlock(newBlockSize, &newBlockIndex);
    10399  }
    10400  else
    10401  {
    10402  break;
    10403  }
    10404  }
    10405  }
    10406 
    10407  if(res == VK_SUCCESS)
    10408  {
    10409  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    10410  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
    10411 
    10412  res = AllocateFromBlock(
    10413  pBlock,
    10414  hCurrentPool,
    10415  currentFrameIndex,
    10416  size,
    10417  alignment,
    10418  allocFlagsCopy,
    10419  createInfo.pUserData,
    10420  suballocType,
    10421  strategy,
    10422  pAllocation);
    10423  if(res == VK_SUCCESS)
    10424  {
    10425  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
    10426  return VK_SUCCESS;
    10427  }
    10428  else
    10429  {
    10430  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
    10431  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10432  }
    10433  }
    10434  }
    10435  }
    10436 
    10437  // 3. Try to allocate from existing blocks with making other allocations lost.
    10438  if(canMakeOtherLost)
    10439  {
    10440  uint32_t tryIndex = 0;
    10441  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    10442  {
    10443  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    10444  VmaAllocationRequest bestRequest = {};
    10445  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    10446 
    10447  // 1. Search existing allocations.
    10449  {
    10450  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10451  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10452  {
    10453  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10454  VMA_ASSERT(pCurrBlock);
    10455  VmaAllocationRequest currRequest = {};
    10456  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10457  currentFrameIndex,
    10458  m_FrameInUseCount,
    10459  m_BufferImageGranularity,
    10460  size,
    10461  alignment,
    10462  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10463  suballocType,
    10464  canMakeOtherLost,
    10465  strategy,
    10466  &currRequest))
    10467  {
    10468  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10469  if(pBestRequestBlock == VMA_NULL ||
    10470  currRequestCost < bestRequestCost)
    10471  {
    10472  pBestRequestBlock = pCurrBlock;
    10473  bestRequest = currRequest;
    10474  bestRequestCost = currRequestCost;
    10475 
    10476  if(bestRequestCost == 0)
    10477  {
    10478  break;
    10479  }
    10480  }
    10481  }
    10482  }
    10483  }
    10484  else // WORST_FIT, FIRST_FIT
    10485  {
    10486  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10487  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10488  {
    10489  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10490  VMA_ASSERT(pCurrBlock);
    10491  VmaAllocationRequest currRequest = {};
    10492  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10493  currentFrameIndex,
    10494  m_FrameInUseCount,
    10495  m_BufferImageGranularity,
    10496  size,
    10497  alignment,
    10498  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10499  suballocType,
    10500  canMakeOtherLost,
    10501  strategy,
    10502  &currRequest))
    10503  {
    10504  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10505  if(pBestRequestBlock == VMA_NULL ||
    10506  currRequestCost < bestRequestCost ||
    10508  {
    10509  pBestRequestBlock = pCurrBlock;
    10510  bestRequest = currRequest;
    10511  bestRequestCost = currRequestCost;
    10512 
    10513  if(bestRequestCost == 0 ||
    10515  {
    10516  break;
    10517  }
    10518  }
    10519  }
    10520  }
    10521  }
    10522 
    10523  if(pBestRequestBlock != VMA_NULL)
    10524  {
    10525  if(mapped)
    10526  {
    10527  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
    10528  if(res != VK_SUCCESS)
    10529  {
    10530  return res;
    10531  }
    10532  }
    10533 
    10534  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
    10535  currentFrameIndex,
    10536  m_FrameInUseCount,
    10537  &bestRequest))
    10538  {
    10539  // We no longer have an empty Allocation.
    10540  if(pBestRequestBlock->m_pMetadata->IsEmpty())
    10541  {
    10542  m_HasEmptyBlock = false;
    10543  }
    10544  // Allocate from this pBlock.
    10545  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10546  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
    10547  (*pAllocation)->InitBlockAllocation(
    10548  hCurrentPool,
    10549  pBestRequestBlock,
    10550  bestRequest.offset,
    10551  alignment,
    10552  size,
    10553  suballocType,
    10554  mapped,
    10555  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10556  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
    10557  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    10558  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    10559  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10560  {
    10561  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10562  }
    10563  if(IsCorruptionDetectionEnabled())
    10564  {
    10565  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
    10566  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10567  }
    10568  return VK_SUCCESS;
    10569  }
    10570  // else: Some allocations must have been touched while we are here. Next try.
    10571  }
    10572  else
    10573  {
    10574  // Could not find place in any of the blocks - break outer loop.
    10575  break;
    10576  }
    10577  }
    10578  /* Maximum number of tries exceeded - a very unlike event when many other
    10579  threads are simultaneously touching allocations making it impossible to make
    10580  lost at the same time as we try to allocate. */
    10581  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    10582  {
    10583  return VK_ERROR_TOO_MANY_OBJECTS;
    10584  }
    10585  }
    10586 
    10587  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10588 }
    10589 
    10590 void VmaBlockVector::Free(
    10591  VmaAllocation hAllocation)
    10592 {
    10593  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    10594 
    10595  // Scope for lock.
    10596  {
    10597  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10598 
    10599  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    10600 
    10601  if(IsCorruptionDetectionEnabled())
    10602  {
    10603  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
    10604  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
    10605  }
    10606 
    10607  if(hAllocation->IsPersistentMap())
    10608  {
    10609  pBlock->Unmap(m_hAllocator, 1);
    10610  }
    10611 
    10612  pBlock->m_pMetadata->Free(hAllocation);
    10613  VMA_HEAVY_ASSERT(pBlock->Validate());
    10614 
    10615  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
    10616 
    10617  // pBlock became empty after this deallocation.
    10618  if(pBlock->m_pMetadata->IsEmpty())
    10619  {
    10620  // Already has empty Allocation. We don't want to have two, so delete this one.
    10621  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    10622  {
    10623  pBlockToDelete = pBlock;
    10624  Remove(pBlock);
    10625  }
    10626  // We now have first empty block.
    10627  else
    10628  {
    10629  m_HasEmptyBlock = true;
    10630  }
    10631  }
    10632  // pBlock didn't become empty, but we have another empty block - find and free that one.
    10633  // (This is optional, heuristics.)
    10634  else if(m_HasEmptyBlock)
    10635  {
    10636  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    10637  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    10638  {
    10639  pBlockToDelete = pLastBlock;
    10640  m_Blocks.pop_back();
    10641  m_HasEmptyBlock = false;
    10642  }
    10643  }
    10644 
    10645  IncrementallySortBlocks();
    10646  }
    10647 
    10648  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    10649  // lock, for performance reason.
    10650  if(pBlockToDelete != VMA_NULL)
    10651  {
    10652  VMA_DEBUG_LOG(" Deleted empty allocation");
    10653  pBlockToDelete->Destroy(m_hAllocator);
    10654  vma_delete(m_hAllocator, pBlockToDelete);
    10655  }
    10656 }
    10657 
    10658 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
    10659 {
    10660  VkDeviceSize result = 0;
    10661  for(size_t i = m_Blocks.size(); i--; )
    10662  {
    10663  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
    10664  if(result >= m_PreferredBlockSize)
    10665  {
    10666  break;
    10667  }
    10668  }
    10669  return result;
    10670 }
    10671 
    10672 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    10673 {
    10674  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10675  {
    10676  if(m_Blocks[blockIndex] == pBlock)
    10677  {
    10678  VmaVectorRemove(m_Blocks, blockIndex);
    10679  return;
    10680  }
    10681  }
    10682  VMA_ASSERT(0);
    10683 }
    10684 
    10685 void VmaBlockVector::IncrementallySortBlocks()
    10686 {
    10687  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10688  {
    10689  // Bubble sort only until first swap.
    10690  for(size_t i = 1; i < m_Blocks.size(); ++i)
    10691  {
    10692  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
    10693  {
    10694  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    10695  return;
    10696  }
    10697  }
    10698  }
    10699 }
    10700 
    10701 VkResult VmaBlockVector::AllocateFromBlock(
    10702  VmaDeviceMemoryBlock* pBlock,
    10703  VmaPool hCurrentPool,
    10704  uint32_t currentFrameIndex,
    10705  VkDeviceSize size,
    10706  VkDeviceSize alignment,
    10707  VmaAllocationCreateFlags allocFlags,
    10708  void* pUserData,
    10709  VmaSuballocationType suballocType,
    10710  uint32_t strategy,
    10711  VmaAllocation* pAllocation)
    10712 {
    10713  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
    10714  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10715  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10716  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10717 
    10718  VmaAllocationRequest currRequest = {};
    10719  if(pBlock->m_pMetadata->CreateAllocationRequest(
    10720  currentFrameIndex,
    10721  m_FrameInUseCount,
    10722  m_BufferImageGranularity,
    10723  size,
    10724  alignment,
    10725  isUpperAddress,
    10726  suballocType,
    10727  false, // canMakeOtherLost
    10728  strategy,
    10729  &currRequest))
    10730  {
    10731  // Allocate from pCurrBlock.
    10732  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    10733 
    10734  if(mapped)
    10735  {
    10736  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
    10737  if(res != VK_SUCCESS)
    10738  {
    10739  return res;
    10740  }
    10741  }
    10742 
    10743  // We no longer have an empty Allocation.
    10744  if(pBlock->m_pMetadata->IsEmpty())
    10745  {
    10746  m_HasEmptyBlock = false;
    10747  }
    10748 
    10749  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10750  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
    10751  (*pAllocation)->InitBlockAllocation(
    10752  hCurrentPool,
    10753  pBlock,
    10754  currRequest.offset,
    10755  alignment,
    10756  size,
    10757  suballocType,
    10758  mapped,
    10759  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10760  VMA_HEAVY_ASSERT(pBlock->Validate());
    10761  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
    10762  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10763  {
    10764  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10765  }
    10766  if(IsCorruptionDetectionEnabled())
    10767  {
    10768  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
    10769  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10770  }
    10771  return VK_SUCCESS;
    10772  }
    10773  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10774 }
    10775 
    10776 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    10777 {
    10778  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    10779  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    10780  allocInfo.allocationSize = blockSize;
    10781  VkDeviceMemory mem = VK_NULL_HANDLE;
    10782  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    10783  if(res < 0)
    10784  {
    10785  return res;
    10786  }
    10787 
    10788  // New VkDeviceMemory successfully created.
    10789 
    10790  // Create new Allocation for it.
    10791  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    10792  pBlock->Init(
    10793  m_hAllocator,
    10794  m_MemoryTypeIndex,
    10795  mem,
    10796  allocInfo.allocationSize,
    10797  m_NextBlockId++,
    10798  m_Algorithm);
    10799 
    10800  m_Blocks.push_back(pBlock);
    10801  if(pNewBlockIndex != VMA_NULL)
    10802  {
    10803  *pNewBlockIndex = m_Blocks.size() - 1;
    10804  }
    10805 
    10806  return VK_SUCCESS;
    10807 }
    10808 
    10809 #if VMA_STATS_STRING_ENABLED
    10810 
    10811 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    10812 {
    10813  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10814 
    10815  json.BeginObject();
    10816 
    10817  if(m_IsCustomPool)
    10818  {
    10819  json.WriteString("MemoryTypeIndex");
    10820  json.WriteNumber(m_MemoryTypeIndex);
    10821 
    10822  json.WriteString("BlockSize");
    10823  json.WriteNumber(m_PreferredBlockSize);
    10824 
    10825  json.WriteString("BlockCount");
    10826  json.BeginObject(true);
    10827  if(m_MinBlockCount > 0)
    10828  {
    10829  json.WriteString("Min");
    10830  json.WriteNumber((uint64_t)m_MinBlockCount);
    10831  }
    10832  if(m_MaxBlockCount < SIZE_MAX)
    10833  {
    10834  json.WriteString("Max");
    10835  json.WriteNumber((uint64_t)m_MaxBlockCount);
    10836  }
    10837  json.WriteString("Cur");
    10838  json.WriteNumber((uint64_t)m_Blocks.size());
    10839  json.EndObject();
    10840 
    10841  if(m_FrameInUseCount > 0)
    10842  {
    10843  json.WriteString("FrameInUseCount");
    10844  json.WriteNumber(m_FrameInUseCount);
    10845  }
    10846 
    10847  if(m_Algorithm != 0)
    10848  {
    10849  json.WriteString("Algorithm");
    10850  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
    10851  }
    10852  }
    10853  else
    10854  {
    10855  json.WriteString("PreferredBlockSize");
    10856  json.WriteNumber(m_PreferredBlockSize);
    10857  }
    10858 
    10859  json.WriteString("Blocks");
    10860  json.BeginObject();
    10861  for(size_t i = 0; i < m_Blocks.size(); ++i)
    10862  {
    10863  json.BeginString();
    10864  json.ContinueString(m_Blocks[i]->GetId());
    10865  json.EndString();
    10866 
    10867  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
    10868  }
    10869  json.EndObject();
    10870 
    10871  json.EndObject();
    10872 }
    10873 
    10874 #endif // #if VMA_STATS_STRING_ENABLED
    10875 
    10876 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
    10877  VmaAllocator hAllocator,
    10878  uint32_t currentFrameIndex)
    10879 {
    10880  if(m_pDefragmentator == VMA_NULL)
    10881  {
    10882  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
    10883  hAllocator,
    10884  this,
    10885  currentFrameIndex);
    10886  }
    10887 
    10888  return m_pDefragmentator;
    10889 }
    10890 
    10891 VkResult VmaBlockVector::Defragment(
    10892  VmaDefragmentationStats* pDefragmentationStats,
    10893  VkDeviceSize& maxBytesToMove,
    10894  uint32_t& maxAllocationsToMove)
    10895 {
    10896  if(m_pDefragmentator == VMA_NULL)
    10897  {
    10898  return VK_SUCCESS;
    10899  }
    10900 
    10901  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10902 
    10903  // Defragment.
    10904  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
    10905 
    10906  // Accumulate statistics.
    10907  if(pDefragmentationStats != VMA_NULL)
    10908  {
    10909  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
    10910  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
    10911  pDefragmentationStats->bytesMoved += bytesMoved;
    10912  pDefragmentationStats->allocationsMoved += allocationsMoved;
    10913  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    10914  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    10915  maxBytesToMove -= bytesMoved;
    10916  maxAllocationsToMove -= allocationsMoved;
    10917  }
    10918 
    10919  // Free empty blocks.
    10920  m_HasEmptyBlock = false;
    10921  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10922  {
    10923  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    10924  if(pBlock->m_pMetadata->IsEmpty())
    10925  {
    10926  if(m_Blocks.size() > m_MinBlockCount)
    10927  {
    10928  if(pDefragmentationStats != VMA_NULL)
    10929  {
    10930  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    10931  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
    10932  }
    10933 
    10934  VmaVectorRemove(m_Blocks, blockIndex);
    10935  pBlock->Destroy(m_hAllocator);
    10936  vma_delete(m_hAllocator, pBlock);
    10937  }
    10938  else
    10939  {
    10940  m_HasEmptyBlock = true;
    10941  }
    10942  }
    10943  }
    10944 
    10945  return result;
    10946 }
    10947 
    10948 void VmaBlockVector::DestroyDefragmentator()
    10949 {
    10950  if(m_pDefragmentator != VMA_NULL)
    10951  {
    10952  vma_delete(m_hAllocator, m_pDefragmentator);
    10953  m_pDefragmentator = VMA_NULL;
    10954  }
    10955 }
    10956 
    10957 void VmaBlockVector::MakePoolAllocationsLost(
    10958  uint32_t currentFrameIndex,
    10959  size_t* pLostAllocationCount)
    10960 {
    10961  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10962  size_t lostAllocationCount = 0;
    10963  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10964  {
    10965  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10966  VMA_ASSERT(pBlock);
    10967  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    10968  }
    10969  if(pLostAllocationCount != VMA_NULL)
    10970  {
    10971  *pLostAllocationCount = lostAllocationCount;
    10972  }
    10973 }
    10974 
    10975 VkResult VmaBlockVector::CheckCorruption()
    10976 {
    10977  if(!IsCorruptionDetectionEnabled())
    10978  {
    10979  return VK_ERROR_FEATURE_NOT_PRESENT;
    10980  }
    10981 
    10982  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10983  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10984  {
    10985  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10986  VMA_ASSERT(pBlock);
    10987  VkResult res = pBlock->CheckCorruption(m_hAllocator);
    10988  if(res != VK_SUCCESS)
    10989  {
    10990  return res;
    10991  }
    10992  }
    10993  return VK_SUCCESS;
    10994 }
    10995 
    10996 void VmaBlockVector::AddStats(VmaStats* pStats)
    10997 {
    10998  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    10999  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    11000 
    11001  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11002 
    11003  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11004  {
    11005  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11006  VMA_ASSERT(pBlock);
    11007  VMA_HEAVY_ASSERT(pBlock->Validate());
    11008  VmaStatInfo allocationStatInfo;
    11009  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
    11010  VmaAddStatInfo(pStats->total, allocationStatInfo);
    11011  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    11012  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    11013  }
    11014 }
    11015 
    11017 // VmaDefragmentator members definition
    11018 
    11019 VmaDefragmentator::VmaDefragmentator(
    11020  VmaAllocator hAllocator,
    11021  VmaBlockVector* pBlockVector,
    11022  uint32_t currentFrameIndex) :
    11023  m_hAllocator(hAllocator),
    11024  m_pBlockVector(pBlockVector),
    11025  m_CurrentFrameIndex(currentFrameIndex),
    11026  m_BytesMoved(0),
    11027  m_AllocationsMoved(0),
    11028  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
    11029  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    11030 {
    11031  VMA_ASSERT(pBlockVector->GetAlgorithm() == 0);
    11032 }
    11033 
    11034 VmaDefragmentator::~VmaDefragmentator()
    11035 {
    11036  for(size_t i = m_Blocks.size(); i--; )
    11037  {
    11038  vma_delete(m_hAllocator, m_Blocks[i]);
    11039  }
    11040 }
    11041 
    11042 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    11043 {
    11044  AllocationInfo allocInfo;
    11045  allocInfo.m_hAllocation = hAlloc;
    11046  allocInfo.m_pChanged = pChanged;
    11047  m_Allocations.push_back(allocInfo);
    11048 }
    11049 
    11050 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
    11051 {
    11052  // It has already been mapped for defragmentation.
    11053  if(m_pMappedDataForDefragmentation)
    11054  {
    11055  *ppMappedData = m_pMappedDataForDefragmentation;
    11056  return VK_SUCCESS;
    11057  }
    11058 
    11059  // It is originally mapped.
    11060  if(m_pBlock->GetMappedData())
    11061  {
    11062  *ppMappedData = m_pBlock->GetMappedData();
    11063  return VK_SUCCESS;
    11064  }
    11065 
    11066  // Map on first usage.
    11067  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
    11068  *ppMappedData = m_pMappedDataForDefragmentation;
    11069  return res;
    11070 }
    11071 
    11072 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
    11073 {
    11074  if(m_pMappedDataForDefragmentation != VMA_NULL)
    11075  {
    11076  m_pBlock->Unmap(hAllocator, 1);
    11077  }
    11078 }
    11079 
    11080 VkResult VmaDefragmentator::DefragmentRound(
    11081  VkDeviceSize maxBytesToMove,
    11082  uint32_t maxAllocationsToMove)
    11083 {
    11084  if(m_Blocks.empty())
    11085  {
    11086  return VK_SUCCESS;
    11087  }
    11088 
    11089  size_t srcBlockIndex = m_Blocks.size() - 1;
    11090  size_t srcAllocIndex = SIZE_MAX;
    11091  for(;;)
    11092  {
    11093  // 1. Find next allocation to move.
    11094  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    11095  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
    11096  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    11097  {
    11098  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    11099  {
    11100  // Finished: no more allocations to process.
    11101  if(srcBlockIndex == 0)
    11102  {
    11103  return VK_SUCCESS;
    11104  }
    11105  else
    11106  {
    11107  --srcBlockIndex;
    11108  srcAllocIndex = SIZE_MAX;
    11109  }
    11110  }
    11111  else
    11112  {
    11113  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    11114  }
    11115  }
    11116 
    11117  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    11118  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    11119 
    11120  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    11121  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    11122  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    11123  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    11124 
    11125  // 2. Try to find new place for this allocation in preceding or current block.
    11126  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    11127  {
    11128  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    11129  VmaAllocationRequest dstAllocRequest;
    11130  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
    11131  m_CurrentFrameIndex,
    11132  m_pBlockVector->GetFrameInUseCount(),
    11133  m_pBlockVector->GetBufferImageGranularity(),
    11134  size,
    11135  alignment,
    11136  false, // upperAddress
    11137  suballocType,
    11138  false, // canMakeOtherLost
    11140  &dstAllocRequest) &&
    11141  MoveMakesSense(
    11142  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    11143  {
    11144  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    11145 
    11146  // Reached limit on number of allocations or bytes to move.
    11147  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    11148  (m_BytesMoved + size > maxBytesToMove))
    11149  {
    11150  return VK_INCOMPLETE;
    11151  }
    11152 
    11153  void* pDstMappedData = VMA_NULL;
    11154  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
    11155  if(res != VK_SUCCESS)
    11156  {
    11157  return res;
    11158  }
    11159 
    11160  void* pSrcMappedData = VMA_NULL;
    11161  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
    11162  if(res != VK_SUCCESS)
    11163  {
    11164  return res;
    11165  }
    11166 
    11167  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    11168  memcpy(
    11169  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
    11170  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
    11171  static_cast<size_t>(size));
    11172 
    11173  if(VMA_DEBUG_MARGIN > 0)
    11174  {
    11175  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
    11176  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
    11177  }
    11178 
    11179  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
    11180  dstAllocRequest,
    11181  suballocType,
    11182  size,
    11183  false, // upperAddress
    11184  allocInfo.m_hAllocation);
    11185  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
    11186 
    11187  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    11188 
    11189  if(allocInfo.m_pChanged != VMA_NULL)
    11190  {
    11191  *allocInfo.m_pChanged = VK_TRUE;
    11192  }
    11193 
    11194  ++m_AllocationsMoved;
    11195  m_BytesMoved += size;
    11196 
    11197  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    11198 
    11199  break;
    11200  }
    11201  }
    11202 
    11203  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    11204 
    11205  if(srcAllocIndex > 0)
    11206  {
    11207  --srcAllocIndex;
    11208  }
    11209  else
    11210  {
    11211  if(srcBlockIndex > 0)
    11212  {
    11213  --srcBlockIndex;
    11214  srcAllocIndex = SIZE_MAX;
    11215  }
    11216  else
    11217  {
    11218  return VK_SUCCESS;
    11219  }
    11220  }
    11221  }
    11222 }
    11223 
    11224 VkResult VmaDefragmentator::Defragment(
    11225  VkDeviceSize maxBytesToMove,
    11226  uint32_t maxAllocationsToMove)
    11227 {
    11228  if(m_Allocations.empty())
    11229  {
    11230  return VK_SUCCESS;
    11231  }
    11232 
    11233  // Create block info for each block.
    11234  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    11235  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11236  {
    11237  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    11238  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    11239  m_Blocks.push_back(pBlockInfo);
    11240  }
    11241 
    11242  // Sort them by m_pBlock pointer value.
    11243  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    11244 
    11245  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
    11246  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
    11247  {
    11248  AllocationInfo& allocInfo = m_Allocations[blockIndex];
    11249  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    11250  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    11251  {
    11252  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
    11253  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    11254  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    11255  {
    11256  (*it)->m_Allocations.push_back(allocInfo);
    11257  }
    11258  else
    11259  {
    11260  VMA_ASSERT(0);
    11261  }
    11262  }
    11263  }
    11264  m_Allocations.clear();
    11265 
    11266  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11267  {
    11268  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    11269  pBlockInfo->CalcHasNonMovableAllocations();
    11270  pBlockInfo->SortAllocationsBySizeDescecnding();
    11271  }
    11272 
    11273  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    11274  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    11275 
    11276  // Execute defragmentation rounds (the main part).
    11277  VkResult result = VK_SUCCESS;
    11278  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
    11279  {
    11280  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
    11281  }
    11282 
    11283  // Unmap blocks that were mapped for defragmentation.
    11284  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11285  {
    11286  m_Blocks[blockIndex]->Unmap(m_hAllocator);
    11287  }
    11288 
    11289  return result;
    11290 }
    11291 
    11292 bool VmaDefragmentator::MoveMakesSense(
    11293  size_t dstBlockIndex, VkDeviceSize dstOffset,
    11294  size_t srcBlockIndex, VkDeviceSize srcOffset)
    11295 {
    11296  if(dstBlockIndex < srcBlockIndex)
    11297  {
    11298  return true;
    11299  }
    11300  if(dstBlockIndex > srcBlockIndex)
    11301  {
    11302  return false;
    11303  }
    11304  if(dstOffset < srcOffset)
    11305  {
    11306  return true;
    11307  }
    11308  return false;
    11309 }
    11310 
    11312 // VmaRecorder
    11313 
    11314 #if VMA_RECORDING_ENABLED
    11315 
    11316 VmaRecorder::VmaRecorder() :
    11317  m_UseMutex(true),
    11318  m_Flags(0),
    11319  m_File(VMA_NULL),
    11320  m_Freq(INT64_MAX),
    11321  m_StartCounter(INT64_MAX)
    11322 {
    11323 }
    11324 
    11325 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
    11326 {
    11327  m_UseMutex = useMutex;
    11328  m_Flags = settings.flags;
    11329 
    11330  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
    11331  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
    11332 
    11333  // Open file for writing.
    11334  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
    11335  if(err != 0)
    11336  {
    11337  return VK_ERROR_INITIALIZATION_FAILED;
    11338  }
    11339 
    11340  // Write header.
    11341  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
    11342  fprintf(m_File, "%s\n", "1,3");
    11343 
    11344  return VK_SUCCESS;
    11345 }
    11346 
    11347 VmaRecorder::~VmaRecorder()
    11348 {
    11349  if(m_File != VMA_NULL)
    11350  {
    11351  fclose(m_File);
    11352  }
    11353 }
    11354 
    11355 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
    11356 {
    11357  CallParams callParams;
    11358  GetBasicParams(callParams);
    11359 
    11360  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11361  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11362  Flush();
    11363 }
    11364 
    11365 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
    11366 {
    11367  CallParams callParams;
    11368  GetBasicParams(callParams);
    11369 
    11370  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11371  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11372  Flush();
    11373 }
    11374 
    11375 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
    11376 {
    11377  CallParams callParams;
    11378  GetBasicParams(callParams);
    11379 
    11380  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11381  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
    11382  createInfo.memoryTypeIndex,
    11383  createInfo.flags,
    11384  createInfo.blockSize,
    11385  (uint64_t)createInfo.minBlockCount,
    11386  (uint64_t)createInfo.maxBlockCount,
    11387  createInfo.frameInUseCount,
    11388  pool);
    11389  Flush();
    11390 }
    11391 
    11392 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
    11393 {
    11394  CallParams callParams;
    11395  GetBasicParams(callParams);
    11396 
    11397  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11398  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
    11399  pool);
    11400  Flush();
    11401 }
    11402 
    11403 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
    11404  const VkMemoryRequirements& vkMemReq,
    11405  const VmaAllocationCreateInfo& createInfo,
    11406  VmaAllocation allocation)
    11407 {
    11408  CallParams callParams;
    11409  GetBasicParams(callParams);
    11410 
    11411  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11412  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11413  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11414  vkMemReq.size,
    11415  vkMemReq.alignment,
    11416  vkMemReq.memoryTypeBits,
    11417  createInfo.flags,
    11418  createInfo.usage,
    11419  createInfo.requiredFlags,
    11420  createInfo.preferredFlags,
    11421  createInfo.memoryTypeBits,
    11422  createInfo.pool,
    11423  allocation,
    11424  userDataStr.GetString());
    11425  Flush();
    11426 }
    11427 
    11428 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    11429  const VkMemoryRequirements& vkMemReq,
    11430  bool requiresDedicatedAllocation,
    11431  bool prefersDedicatedAllocation,
    11432  const VmaAllocationCreateInfo& createInfo,
    11433  VmaAllocation allocation)
    11434 {
    11435  CallParams callParams;
    11436  GetBasicParams(callParams);
    11437 
    11438  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11439  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11440  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11441  vkMemReq.size,
    11442  vkMemReq.alignment,
    11443  vkMemReq.memoryTypeBits,
    11444  requiresDedicatedAllocation ? 1 : 0,
    11445  prefersDedicatedAllocation ? 1 : 0,
    11446  createInfo.flags,
    11447  createInfo.usage,
    11448  createInfo.requiredFlags,
    11449  createInfo.preferredFlags,
    11450  createInfo.memoryTypeBits,
    11451  createInfo.pool,
    11452  allocation,
    11453  userDataStr.GetString());
    11454  Flush();
    11455 }
    11456 
    11457 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
    11458  const VkMemoryRequirements& vkMemReq,
    11459  bool requiresDedicatedAllocation,
    11460  bool prefersDedicatedAllocation,
    11461  const VmaAllocationCreateInfo& createInfo,
    11462  VmaAllocation allocation)
    11463 {
    11464  CallParams callParams;
    11465  GetBasicParams(callParams);
    11466 
    11467  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11468  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11469  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11470  vkMemReq.size,
    11471  vkMemReq.alignment,
    11472  vkMemReq.memoryTypeBits,
    11473  requiresDedicatedAllocation ? 1 : 0,
    11474  prefersDedicatedAllocation ? 1 : 0,
    11475  createInfo.flags,
    11476  createInfo.usage,
    11477  createInfo.requiredFlags,
    11478  createInfo.preferredFlags,
    11479  createInfo.memoryTypeBits,
    11480  createInfo.pool,
    11481  allocation,
    11482  userDataStr.GetString());
    11483  Flush();
    11484 }
    11485 
    11486 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
    11487  VmaAllocation allocation)
    11488 {
    11489  CallParams callParams;
    11490  GetBasicParams(callParams);
    11491 
    11492  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11493  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11494  allocation);
    11495  Flush();
    11496 }
    11497 
    11498 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
    11499  VmaAllocation allocation,
    11500  const void* pUserData)
    11501 {
    11502  CallParams callParams;
    11503  GetBasicParams(callParams);
    11504 
    11505  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11506  UserDataString userDataStr(
    11507  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
    11508  pUserData);
    11509  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11510  allocation,
    11511  userDataStr.GetString());
    11512  Flush();
    11513 }
    11514 
    11515 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
    11516  VmaAllocation allocation)
    11517 {
    11518  CallParams callParams;
    11519  GetBasicParams(callParams);
    11520 
    11521  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11522  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11523  allocation);
    11524  Flush();
    11525 }
    11526 
    11527 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
    11528  VmaAllocation allocation)
    11529 {
    11530  CallParams callParams;
    11531  GetBasicParams(callParams);
    11532 
    11533  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11534  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11535  allocation);
    11536  Flush();
    11537 }
    11538 
    11539 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
    11540  VmaAllocation allocation)
    11541 {
    11542  CallParams callParams;
    11543  GetBasicParams(callParams);
    11544 
    11545  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11546  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11547  allocation);
    11548  Flush();
    11549 }
    11550 
    11551 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
    11552  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11553 {
    11554  CallParams callParams;
    11555  GetBasicParams(callParams);
    11556 
    11557  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11558  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11559  allocation,
    11560  offset,
    11561  size);
    11562  Flush();
    11563 }
    11564 
    11565 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
    11566  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11567 {
    11568  CallParams callParams;
    11569  GetBasicParams(callParams);
    11570 
    11571  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11572  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11573  allocation,
    11574  offset,
    11575  size);
    11576  Flush();
    11577 }
    11578 
    11579 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
    11580  const VkBufferCreateInfo& bufCreateInfo,
    11581  const VmaAllocationCreateInfo& allocCreateInfo,
    11582  VmaAllocation allocation)
    11583 {
    11584  CallParams callParams;
    11585  GetBasicParams(callParams);
    11586 
    11587  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11588  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11589  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11590  bufCreateInfo.flags,
    11591  bufCreateInfo.size,
    11592  bufCreateInfo.usage,
    11593  bufCreateInfo.sharingMode,
    11594  allocCreateInfo.flags,
    11595  allocCreateInfo.usage,
    11596  allocCreateInfo.requiredFlags,
    11597  allocCreateInfo.preferredFlags,
    11598  allocCreateInfo.memoryTypeBits,
    11599  allocCreateInfo.pool,
    11600  allocation,
    11601  userDataStr.GetString());
    11602  Flush();
    11603 }
    11604 
    11605 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
    11606  const VkImageCreateInfo& imageCreateInfo,
    11607  const VmaAllocationCreateInfo& allocCreateInfo,
    11608  VmaAllocation allocation)
    11609 {
    11610  CallParams callParams;
    11611  GetBasicParams(callParams);
    11612 
    11613  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11614  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11615  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11616  imageCreateInfo.flags,
    11617  imageCreateInfo.imageType,
    11618  imageCreateInfo.format,
    11619  imageCreateInfo.extent.width,
    11620  imageCreateInfo.extent.height,
    11621  imageCreateInfo.extent.depth,
    11622  imageCreateInfo.mipLevels,
    11623  imageCreateInfo.arrayLayers,
    11624  imageCreateInfo.samples,
    11625  imageCreateInfo.tiling,
    11626  imageCreateInfo.usage,
    11627  imageCreateInfo.sharingMode,
    11628  imageCreateInfo.initialLayout,
    11629  allocCreateInfo.flags,
    11630  allocCreateInfo.usage,
    11631  allocCreateInfo.requiredFlags,
    11632  allocCreateInfo.preferredFlags,
    11633  allocCreateInfo.memoryTypeBits,
    11634  allocCreateInfo.pool,
    11635  allocation,
    11636  userDataStr.GetString());
    11637  Flush();
    11638 }
    11639 
    11640 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
    11641  VmaAllocation allocation)
    11642 {
    11643  CallParams callParams;
    11644  GetBasicParams(callParams);
    11645 
    11646  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11647  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
    11648  allocation);
    11649  Flush();
    11650 }
    11651 
    11652 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
    11653  VmaAllocation allocation)
    11654 {
    11655  CallParams callParams;
    11656  GetBasicParams(callParams);
    11657 
    11658  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11659  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
    11660  allocation);
    11661  Flush();
    11662 }
    11663 
    11664 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
    11665  VmaAllocation allocation)
    11666 {
    11667  CallParams callParams;
    11668  GetBasicParams(callParams);
    11669 
    11670  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11671  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11672  allocation);
    11673  Flush();
    11674 }
    11675 
    11676 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
    11677  VmaAllocation allocation)
    11678 {
    11679  CallParams callParams;
    11680  GetBasicParams(callParams);
    11681 
    11682  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11683  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
    11684  allocation);
    11685  Flush();
    11686 }
    11687 
    11688 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
    11689  VmaPool pool)
    11690 {
    11691  CallParams callParams;
    11692  GetBasicParams(callParams);
    11693 
    11694  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11695  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
    11696  pool);
    11697  Flush();
    11698 }
    11699 
    11700 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
    11701 {
    11702  if(pUserData != VMA_NULL)
    11703  {
    11704  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
    11705  {
    11706  m_Str = (const char*)pUserData;
    11707  }
    11708  else
    11709  {
    11710  sprintf_s(m_PtrStr, "%p", pUserData);
    11711  m_Str = m_PtrStr;
    11712  }
    11713  }
    11714  else
    11715  {
    11716  m_Str = "";
    11717  }
    11718 }
    11719 
    11720 void VmaRecorder::WriteConfiguration(
    11721  const VkPhysicalDeviceProperties& devProps,
    11722  const VkPhysicalDeviceMemoryProperties& memProps,
    11723  bool dedicatedAllocationExtensionEnabled)
    11724 {
    11725  fprintf(m_File, "Config,Begin\n");
    11726 
    11727  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
    11728  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
    11729  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
    11730  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
    11731  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
    11732  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
    11733 
    11734  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
    11735  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
    11736  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
    11737 
    11738  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
    11739  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
    11740  {
    11741  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
    11742  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
    11743  }
    11744  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
    11745  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
    11746  {
    11747  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
    11748  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
    11749  }
    11750 
    11751  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
    11752 
    11753  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
    11754  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
    11755  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
    11756  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
    11757  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
    11758  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
    11759  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
    11760  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
    11761  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    11762 
    11763  fprintf(m_File, "Config,End\n");
    11764 }
    11765 
    11766 void VmaRecorder::GetBasicParams(CallParams& outParams)
    11767 {
    11768  outParams.threadId = GetCurrentThreadId();
    11769 
    11770  LARGE_INTEGER counter;
    11771  QueryPerformanceCounter(&counter);
    11772  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
    11773 }
    11774 
    11775 void VmaRecorder::Flush()
    11776 {
    11777  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
    11778  {
    11779  fflush(m_File);
    11780  }
    11781 }
    11782 
    11783 #endif // #if VMA_RECORDING_ENABLED
    11784 
    11786 // VmaAllocator_T
    11787 
    11788 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    11789  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    11790  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    11791  m_hDevice(pCreateInfo->device),
    11792  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    11793  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    11794  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    11795  m_PreferredLargeHeapBlockSize(0),
    11796  m_PhysicalDevice(pCreateInfo->physicalDevice),
    11797  m_CurrentFrameIndex(0),
    11798  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
    11799  m_NextPoolId(0)
    11801  ,m_pRecorder(VMA_NULL)
    11802 #endif
    11803 {
    11804  if(VMA_DEBUG_DETECT_CORRUPTION)
    11805  {
    11806  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
    11807  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
    11808  }
    11809 
    11810  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    11811 
    11812 #if !(VMA_DEDICATED_ALLOCATION)
    11814  {
    11815  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
    11816  }
    11817 #endif
    11818 
    11819  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    11820  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    11821  memset(&m_MemProps, 0, sizeof(m_MemProps));
    11822 
    11823  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    11824  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    11825 
    11826  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    11827  {
    11828  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    11829  }
    11830 
    11831  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    11832  {
    11833  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    11834  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    11835  }
    11836 
    11837  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    11838 
    11839  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    11840  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    11841 
    11842  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
    11843  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
    11844  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
    11845  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
    11846 
    11847  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    11848  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    11849 
    11850  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    11851  {
    11852  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    11853  {
    11854  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    11855  if(limit != VK_WHOLE_SIZE)
    11856  {
    11857  m_HeapSizeLimit[heapIndex] = limit;
    11858  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    11859  {
    11860  m_MemProps.memoryHeaps[heapIndex].size = limit;
    11861  }
    11862  }
    11863  }
    11864  }
    11865 
    11866  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    11867  {
    11868  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    11869 
    11870  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    11871  this,
    11872  memTypeIndex,
    11873  preferredBlockSize,
    11874  0,
    11875  SIZE_MAX,
    11876  GetBufferImageGranularity(),
    11877  pCreateInfo->frameInUseCount,
    11878  false, // isCustomPool
    11879  false, // explicitBlockSize
    11880  false); // linearAlgorithm
    11881  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    11882  // becase minBlockCount is 0.
    11883  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    11884 
    11885  }
    11886 }
    11887 
    11888 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
    11889 {
    11890  VkResult res = VK_SUCCESS;
    11891 
    11892  if(pCreateInfo->pRecordSettings != VMA_NULL &&
    11893  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
    11894  {
    11895 #if VMA_RECORDING_ENABLED
    11896  m_pRecorder = vma_new(this, VmaRecorder)();
    11897  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
    11898  if(res != VK_SUCCESS)
    11899  {
    11900  return res;
    11901  }
    11902  m_pRecorder->WriteConfiguration(
    11903  m_PhysicalDeviceProperties,
    11904  m_MemProps,
    11905  m_UseKhrDedicatedAllocation);
    11906  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
    11907 #else
    11908  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
    11909  return VK_ERROR_FEATURE_NOT_PRESENT;
    11910 #endif
    11911  }
    11912 
    11913  return res;
    11914 }
    11915 
    11916 VmaAllocator_T::~VmaAllocator_T()
    11917 {
    11918 #if VMA_RECORDING_ENABLED
    11919  if(m_pRecorder != VMA_NULL)
    11920  {
    11921  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
    11922  vma_delete(this, m_pRecorder);
    11923  }
    11924 #endif
    11925 
    11926  VMA_ASSERT(m_Pools.empty());
    11927 
    11928  for(size_t i = GetMemoryTypeCount(); i--; )
    11929  {
    11930  vma_delete(this, m_pDedicatedAllocations[i]);
    11931  vma_delete(this, m_pBlockVectors[i]);
    11932  }
    11933 }
    11934 
    11935 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    11936 {
    11937 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    11938  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
    11939  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
    11940  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    11941  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
    11942  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
    11943  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
    11944  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
    11945  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
    11946  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
    11947  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
    11948  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
    11949  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
    11950  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
    11951  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
    11952  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
    11953  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
    11954 #if VMA_DEDICATED_ALLOCATION
    11955  if(m_UseKhrDedicatedAllocation)
    11956  {
    11957  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
    11958  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
    11959  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
    11960  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
    11961  }
    11962 #endif // #if VMA_DEDICATED_ALLOCATION
    11963 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    11964 
    11965 #define VMA_COPY_IF_NOT_NULL(funcName) \
    11966  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    11967 
    11968  if(pVulkanFunctions != VMA_NULL)
    11969  {
    11970  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    11971  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    11972  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    11973  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    11974  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    11975  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    11976  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
    11977  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
    11978  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    11979  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    11980  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    11981  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    11982  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    11983  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    11984  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    11985  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    11986 #if VMA_DEDICATED_ALLOCATION
    11987  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    11988  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    11989 #endif
    11990  }
    11991 
    11992 #undef VMA_COPY_IF_NOT_NULL
    11993 
    11994  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    11995  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    11996  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    11997  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    11998  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    11999  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    12000  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    12001  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    12002  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
    12003  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
    12004  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    12005  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    12006  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    12007  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    12008  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    12009  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    12010  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    12011  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    12012 #if VMA_DEDICATED_ALLOCATION
    12013  if(m_UseKhrDedicatedAllocation)
    12014  {
    12015  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    12016  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    12017  }
    12018 #endif
    12019 }
    12020 
    12021 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    12022 {
    12023  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12024  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    12025  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
    12026  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
    12027 }
    12028 
    12029 VkResult VmaAllocator_T::AllocateMemoryOfType(
    12030  VkDeviceSize size,
    12031  VkDeviceSize alignment,
    12032  bool dedicatedAllocation,
    12033  VkBuffer dedicatedBuffer,
    12034  VkImage dedicatedImage,
    12035  const VmaAllocationCreateInfo& createInfo,
    12036  uint32_t memTypeIndex,
    12037  VmaSuballocationType suballocType,
    12038  VmaAllocation* pAllocation)
    12039 {
    12040  VMA_ASSERT(pAllocation != VMA_NULL);
    12041  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
    12042 
    12043  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    12044 
    12045  // If memory type is not HOST_VISIBLE, disable MAPPED.
    12046  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12047  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    12048  {
    12049  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    12050  }
    12051 
    12052  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    12053  VMA_ASSERT(blockVector);
    12054 
    12055  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    12056  bool preferDedicatedMemory =
    12057  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    12058  dedicatedAllocation ||
    12059  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    12060  size > preferredBlockSize / 2;
    12061 
    12062  if(preferDedicatedMemory &&
    12063  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    12064  finalCreateInfo.pool == VK_NULL_HANDLE)
    12065  {
    12067  }
    12068 
    12069  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    12070  {
    12071  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12072  {
    12073  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12074  }
    12075  else
    12076  {
    12077  return AllocateDedicatedMemory(
    12078  size,
    12079  suballocType,
    12080  memTypeIndex,
    12081  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12082  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12083  finalCreateInfo.pUserData,
    12084  dedicatedBuffer,
    12085  dedicatedImage,
    12086  pAllocation);
    12087  }
    12088  }
    12089  else
    12090  {
    12091  VkResult res = blockVector->Allocate(
    12092  VK_NULL_HANDLE, // hCurrentPool
    12093  m_CurrentFrameIndex.load(),
    12094  size,
    12095  alignment,
    12096  finalCreateInfo,
    12097  suballocType,
    12098  pAllocation);
    12099  if(res == VK_SUCCESS)
    12100  {
    12101  return res;
    12102  }
    12103 
    12104  // 5. Try dedicated memory.
    12105  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12106  {
    12107  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12108  }
    12109  else
    12110  {
    12111  res = AllocateDedicatedMemory(
    12112  size,
    12113  suballocType,
    12114  memTypeIndex,
    12115  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12116  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12117  finalCreateInfo.pUserData,
    12118  dedicatedBuffer,
    12119  dedicatedImage,
    12120  pAllocation);
    12121  if(res == VK_SUCCESS)
    12122  {
    12123  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    12124  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    12125  return VK_SUCCESS;
    12126  }
    12127  else
    12128  {
    12129  // Everything failed: Return error code.
    12130  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12131  return res;
    12132  }
    12133  }
    12134  }
    12135 }
    12136 
    12137 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    12138  VkDeviceSize size,
    12139  VmaSuballocationType suballocType,
    12140  uint32_t memTypeIndex,
    12141  bool map,
    12142  bool isUserDataString,
    12143  void* pUserData,
    12144  VkBuffer dedicatedBuffer,
    12145  VkImage dedicatedImage,
    12146  VmaAllocation* pAllocation)
    12147 {
    12148  VMA_ASSERT(pAllocation);
    12149 
    12150  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    12151  allocInfo.memoryTypeIndex = memTypeIndex;
    12152  allocInfo.allocationSize = size;
    12153 
    12154 #if VMA_DEDICATED_ALLOCATION
    12155  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    12156  if(m_UseKhrDedicatedAllocation)
    12157  {
    12158  if(dedicatedBuffer != VK_NULL_HANDLE)
    12159  {
    12160  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    12161  dedicatedAllocInfo.buffer = dedicatedBuffer;
    12162  allocInfo.pNext = &dedicatedAllocInfo;
    12163  }
    12164  else if(dedicatedImage != VK_NULL_HANDLE)
    12165  {
    12166  dedicatedAllocInfo.image = dedicatedImage;
    12167  allocInfo.pNext = &dedicatedAllocInfo;
    12168  }
    12169  }
    12170 #endif // #if VMA_DEDICATED_ALLOCATION
    12171 
    12172  // Allocate VkDeviceMemory.
    12173  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    12174  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    12175  if(res < 0)
    12176  {
    12177  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12178  return res;
    12179  }
    12180 
    12181  void* pMappedData = VMA_NULL;
    12182  if(map)
    12183  {
    12184  res = (*m_VulkanFunctions.vkMapMemory)(
    12185  m_hDevice,
    12186  hMemory,
    12187  0,
    12188  VK_WHOLE_SIZE,
    12189  0,
    12190  &pMappedData);
    12191  if(res < 0)
    12192  {
    12193  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    12194  FreeVulkanMemory(memTypeIndex, size, hMemory);
    12195  return res;
    12196  }
    12197  }
    12198 
    12199  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
    12200  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
    12201  (*pAllocation)->SetUserData(this, pUserData);
    12202  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12203  {
    12204  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    12205  }
    12206 
    12207  // Register it in m_pDedicatedAllocations.
    12208  {
    12209  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12210  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    12211  VMA_ASSERT(pDedicatedAllocations);
    12212  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
    12213  }
    12214 
    12215  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
    12216 
    12217  return VK_SUCCESS;
    12218 }
    12219 
    12220 void VmaAllocator_T::GetBufferMemoryRequirements(
    12221  VkBuffer hBuffer,
    12222  VkMemoryRequirements& memReq,
    12223  bool& requiresDedicatedAllocation,
    12224  bool& prefersDedicatedAllocation) const
    12225 {
    12226 #if VMA_DEDICATED_ALLOCATION
    12227  if(m_UseKhrDedicatedAllocation)
    12228  {
    12229  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12230  memReqInfo.buffer = hBuffer;
    12231 
    12232  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12233 
    12234  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12235  memReq2.pNext = &memDedicatedReq;
    12236 
    12237  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12238 
    12239  memReq = memReq2.memoryRequirements;
    12240  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12241  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12242  }
    12243  else
    12244 #endif // #if VMA_DEDICATED_ALLOCATION
    12245  {
    12246  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    12247  requiresDedicatedAllocation = false;
    12248  prefersDedicatedAllocation = false;
    12249  }
    12250 }
    12251 
    12252 void VmaAllocator_T::GetImageMemoryRequirements(
    12253  VkImage hImage,
    12254  VkMemoryRequirements& memReq,
    12255  bool& requiresDedicatedAllocation,
    12256  bool& prefersDedicatedAllocation) const
    12257 {
    12258 #if VMA_DEDICATED_ALLOCATION
    12259  if(m_UseKhrDedicatedAllocation)
    12260  {
    12261  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12262  memReqInfo.image = hImage;
    12263 
    12264  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12265 
    12266  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12267  memReq2.pNext = &memDedicatedReq;
    12268 
    12269  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12270 
    12271  memReq = memReq2.memoryRequirements;
    12272  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12273  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12274  }
    12275  else
    12276 #endif // #if VMA_DEDICATED_ALLOCATION
    12277  {
    12278  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    12279  requiresDedicatedAllocation = false;
    12280  prefersDedicatedAllocation = false;
    12281  }
    12282 }
    12283 
    12284 VkResult VmaAllocator_T::AllocateMemory(
    12285  const VkMemoryRequirements& vkMemReq,
    12286  bool requiresDedicatedAllocation,
    12287  bool prefersDedicatedAllocation,
    12288  VkBuffer dedicatedBuffer,
    12289  VkImage dedicatedImage,
    12290  const VmaAllocationCreateInfo& createInfo,
    12291  VmaSuballocationType suballocType,
    12292  VmaAllocation* pAllocation)
    12293 {
    12294  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
    12295 
    12296  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    12297  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12298  {
    12299  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    12300  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12301  }
    12302  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12304  {
    12305  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    12306  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12307  }
    12308  if(requiresDedicatedAllocation)
    12309  {
    12310  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12311  {
    12312  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    12313  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12314  }
    12315  if(createInfo.pool != VK_NULL_HANDLE)
    12316  {
    12317  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    12318  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12319  }
    12320  }
    12321  if((createInfo.pool != VK_NULL_HANDLE) &&
    12322  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    12323  {
    12324  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    12325  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12326  }
    12327 
    12328  if(createInfo.pool != VK_NULL_HANDLE)
    12329  {
    12330  const VkDeviceSize alignmentForPool = VMA_MAX(
    12331  vkMemReq.alignment,
    12332  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
    12333  return createInfo.pool->m_BlockVector.Allocate(
    12334  createInfo.pool,
    12335  m_CurrentFrameIndex.load(),
    12336  vkMemReq.size,
    12337  alignmentForPool,
    12338  createInfo,
    12339  suballocType,
    12340  pAllocation);
    12341  }
    12342  else
    12343  {
    12344  // Bit mask of memory Vulkan types acceptable for this allocation.
    12345  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    12346  uint32_t memTypeIndex = UINT32_MAX;
    12347  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12348  if(res == VK_SUCCESS)
    12349  {
    12350  VkDeviceSize alignmentForMemType = VMA_MAX(
    12351  vkMemReq.alignment,
    12352  GetMemoryTypeMinAlignment(memTypeIndex));
    12353 
    12354  res = AllocateMemoryOfType(
    12355  vkMemReq.size,
    12356  alignmentForMemType,
    12357  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12358  dedicatedBuffer,
    12359  dedicatedImage,
    12360  createInfo,
    12361  memTypeIndex,
    12362  suballocType,
    12363  pAllocation);
    12364  // Succeeded on first try.
    12365  if(res == VK_SUCCESS)
    12366  {
    12367  return res;
    12368  }
    12369  // Allocation from this memory type failed. Try other compatible memory types.
    12370  else
    12371  {
    12372  for(;;)
    12373  {
    12374  // Remove old memTypeIndex from list of possibilities.
    12375  memoryTypeBits &= ~(1u << memTypeIndex);
    12376  // Find alternative memTypeIndex.
    12377  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12378  if(res == VK_SUCCESS)
    12379  {
    12380  alignmentForMemType = VMA_MAX(
    12381  vkMemReq.alignment,
    12382  GetMemoryTypeMinAlignment(memTypeIndex));
    12383 
    12384  res = AllocateMemoryOfType(
    12385  vkMemReq.size,
    12386  alignmentForMemType,
    12387  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12388  dedicatedBuffer,
    12389  dedicatedImage,
    12390  createInfo,
    12391  memTypeIndex,
    12392  suballocType,
    12393  pAllocation);
    12394  // Allocation from this alternative memory type succeeded.
    12395  if(res == VK_SUCCESS)
    12396  {
    12397  return res;
    12398  }
    12399  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    12400  }
    12401  // No other matching memory type index could be found.
    12402  else
    12403  {
    12404  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    12405  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12406  }
    12407  }
    12408  }
    12409  }
    12410  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    12411  else
    12412  return res;
    12413  }
    12414 }
    12415 
    12416 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
    12417 {
    12418  VMA_ASSERT(allocation);
    12419 
    12420  if(TouchAllocation(allocation))
    12421  {
    12422  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12423  {
    12424  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
    12425  }
    12426 
    12427  switch(allocation->GetType())
    12428  {
    12429  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12430  {
    12431  VmaBlockVector* pBlockVector = VMA_NULL;
    12432  VmaPool hPool = allocation->GetPool();
    12433  if(hPool != VK_NULL_HANDLE)
    12434  {
    12435  pBlockVector = &hPool->m_BlockVector;
    12436  }
    12437  else
    12438  {
    12439  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    12440  pBlockVector = m_pBlockVectors[memTypeIndex];
    12441  }
    12442  pBlockVector->Free(allocation);
    12443  }
    12444  break;
    12445  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12446  FreeDedicatedMemory(allocation);
    12447  break;
    12448  default:
    12449  VMA_ASSERT(0);
    12450  }
    12451  }
    12452 
    12453  allocation->SetUserData(this, VMA_NULL);
    12454  vma_delete(this, allocation);
    12455 }
    12456 
    12457 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    12458 {
    12459  // Initialize.
    12460  InitStatInfo(pStats->total);
    12461  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    12462  InitStatInfo(pStats->memoryType[i]);
    12463  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    12464  InitStatInfo(pStats->memoryHeap[i]);
    12465 
    12466  // Process default pools.
    12467  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12468  {
    12469  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    12470  VMA_ASSERT(pBlockVector);
    12471  pBlockVector->AddStats(pStats);
    12472  }
    12473 
    12474  // Process custom pools.
    12475  {
    12476  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12477  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    12478  {
    12479  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
    12480  }
    12481  }
    12482 
    12483  // Process dedicated allocations.
    12484  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12485  {
    12486  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12487  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12488  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    12489  VMA_ASSERT(pDedicatedAllocVector);
    12490  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    12491  {
    12492  VmaStatInfo allocationStatInfo;
    12493  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    12494  VmaAddStatInfo(pStats->total, allocationStatInfo);
    12495  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    12496  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    12497  }
    12498  }
    12499 
    12500  // Postprocess.
    12501  VmaPostprocessCalcStatInfo(pStats->total);
    12502  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    12503  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    12504  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    12505  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    12506 }
    12507 
    12508 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    12509 
    12510 VkResult VmaAllocator_T::Defragment(
    12511  VmaAllocation* pAllocations,
    12512  size_t allocationCount,
    12513  VkBool32* pAllocationsChanged,
    12514  const VmaDefragmentationInfo* pDefragmentationInfo,
    12515  VmaDefragmentationStats* pDefragmentationStats)
    12516 {
    12517  if(pAllocationsChanged != VMA_NULL)
    12518  {
    12519  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
    12520  }
    12521  if(pDefragmentationStats != VMA_NULL)
    12522  {
    12523  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
    12524  }
    12525 
    12526  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
    12527 
    12528  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
    12529 
    12530  const size_t poolCount = m_Pools.size();
    12531 
    12532  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
    12533  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    12534  {
    12535  VmaAllocation hAlloc = pAllocations[allocIndex];
    12536  VMA_ASSERT(hAlloc);
    12537  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    12538  // DedicatedAlloc cannot be defragmented.
    12539  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    12540  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    12541  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
    12542  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
    12543  // Lost allocation cannot be defragmented.
    12544  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    12545  {
    12546  VmaBlockVector* pAllocBlockVector = VMA_NULL;
    12547 
    12548  const VmaPool hAllocPool = hAlloc->GetPool();
    12549  // This allocation belongs to custom pool.
    12550  if(hAllocPool != VK_NULL_HANDLE)
    12551  {
    12552  // Pools with linear or buddy algorithm are not defragmented.
    12553  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
    12554  {
    12555  pAllocBlockVector = &hAllocPool->m_BlockVector;
    12556  }
    12557  }
    12558  // This allocation belongs to general pool.
    12559  else
    12560  {
    12561  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
    12562  }
    12563 
    12564  if(pAllocBlockVector != VMA_NULL)
    12565  {
    12566  VmaDefragmentator* const pDefragmentator =
    12567  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
    12568  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    12569  &pAllocationsChanged[allocIndex] : VMA_NULL;
    12570  pDefragmentator->AddAllocation(hAlloc, pChanged);
    12571  }
    12572  }
    12573  }
    12574 
    12575  VkResult result = VK_SUCCESS;
    12576 
    12577  // ======== Main processing.
    12578 
    12579  VkDeviceSize maxBytesToMove = SIZE_MAX;
    12580  uint32_t maxAllocationsToMove = UINT32_MAX;
    12581  if(pDefragmentationInfo != VMA_NULL)
    12582  {
    12583  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
    12584  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    12585  }
    12586 
    12587  // Process standard memory.
    12588  for(uint32_t memTypeIndex = 0;
    12589  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
    12590  ++memTypeIndex)
    12591  {
    12592  // Only HOST_VISIBLE memory types can be defragmented.
    12593  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12594  {
    12595  result = m_pBlockVectors[memTypeIndex]->Defragment(
    12596  pDefragmentationStats,
    12597  maxBytesToMove,
    12598  maxAllocationsToMove);
    12599  }
    12600  }
    12601 
    12602  // Process custom pools.
    12603  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
    12604  {
    12605  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
    12606  pDefragmentationStats,
    12607  maxBytesToMove,
    12608  maxAllocationsToMove);
    12609  }
    12610 
    12611  // ======== Destroy defragmentators.
    12612 
    12613  // Process custom pools.
    12614  for(size_t poolIndex = poolCount; poolIndex--; )
    12615  {
    12616  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
    12617  }
    12618 
    12619  // Process standard memory.
    12620  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
    12621  {
    12622  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12623  {
    12624  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
    12625  }
    12626  }
    12627 
    12628  return result;
    12629 }
    12630 
    12631 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    12632 {
    12633  if(hAllocation->CanBecomeLost())
    12634  {
    12635  /*
    12636  Warning: This is a carefully designed algorithm.
    12637  Do not modify unless you really know what you're doing :)
    12638  */
    12639  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12640  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12641  for(;;)
    12642  {
    12643  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12644  {
    12645  pAllocationInfo->memoryType = UINT32_MAX;
    12646  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    12647  pAllocationInfo->offset = 0;
    12648  pAllocationInfo->size = hAllocation->GetSize();
    12649  pAllocationInfo->pMappedData = VMA_NULL;
    12650  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12651  return;
    12652  }
    12653  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12654  {
    12655  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12656  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12657  pAllocationInfo->offset = hAllocation->GetOffset();
    12658  pAllocationInfo->size = hAllocation->GetSize();
    12659  pAllocationInfo->pMappedData = VMA_NULL;
    12660  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12661  return;
    12662  }
    12663  else // Last use time earlier than current time.
    12664  {
    12665  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12666  {
    12667  localLastUseFrameIndex = localCurrFrameIndex;
    12668  }
    12669  }
    12670  }
    12671  }
    12672  else
    12673  {
    12674 #if VMA_STATS_STRING_ENABLED
    12675  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12676  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12677  for(;;)
    12678  {
    12679  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12680  if(localLastUseFrameIndex == localCurrFrameIndex)
    12681  {
    12682  break;
    12683  }
    12684  else // Last use time earlier than current time.
    12685  {
    12686  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12687  {
    12688  localLastUseFrameIndex = localCurrFrameIndex;
    12689  }
    12690  }
    12691  }
    12692 #endif
    12693 
    12694  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12695  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12696  pAllocationInfo->offset = hAllocation->GetOffset();
    12697  pAllocationInfo->size = hAllocation->GetSize();
    12698  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    12699  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12700  }
    12701 }
    12702 
    12703 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
    12704 {
    12705  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
    12706  if(hAllocation->CanBecomeLost())
    12707  {
    12708  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12709  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12710  for(;;)
    12711  {
    12712  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12713  {
    12714  return false;
    12715  }
    12716  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12717  {
    12718  return true;
    12719  }
    12720  else // Last use time earlier than current time.
    12721  {
    12722  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12723  {
    12724  localLastUseFrameIndex = localCurrFrameIndex;
    12725  }
    12726  }
    12727  }
    12728  }
    12729  else
    12730  {
    12731 #if VMA_STATS_STRING_ENABLED
    12732  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12733  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12734  for(;;)
    12735  {
    12736  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12737  if(localLastUseFrameIndex == localCurrFrameIndex)
    12738  {
    12739  break;
    12740  }
    12741  else // Last use time earlier than current time.
    12742  {
    12743  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12744  {
    12745  localLastUseFrameIndex = localCurrFrameIndex;
    12746  }
    12747  }
    12748  }
    12749 #endif
    12750 
    12751  return true;
    12752  }
    12753 }
    12754 
    12755 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    12756 {
    12757  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
    12758 
    12759  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    12760 
    12761  if(newCreateInfo.maxBlockCount == 0)
    12762  {
    12763  newCreateInfo.maxBlockCount = SIZE_MAX;
    12764  }
    12765  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
    12766  {
    12767  return VK_ERROR_INITIALIZATION_FAILED;
    12768  }
    12769 
    12770  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    12771 
    12772  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
    12773 
    12774  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    12775  if(res != VK_SUCCESS)
    12776  {
    12777  vma_delete(this, *pPool);
    12778  *pPool = VMA_NULL;
    12779  return res;
    12780  }
    12781 
    12782  // Add to m_Pools.
    12783  {
    12784  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12785  (*pPool)->SetId(m_NextPoolId++);
    12786  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    12787  }
    12788 
    12789  return VK_SUCCESS;
    12790 }
    12791 
    12792 void VmaAllocator_T::DestroyPool(VmaPool pool)
    12793 {
    12794  // Remove from m_Pools.
    12795  {
    12796  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12797  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    12798  VMA_ASSERT(success && "Pool not found in Allocator.");
    12799  }
    12800 
    12801  vma_delete(this, pool);
    12802 }
    12803 
    12804 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    12805 {
    12806  pool->m_BlockVector.GetPoolStats(pPoolStats);
    12807 }
    12808 
    12809 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    12810 {
    12811  m_CurrentFrameIndex.store(frameIndex);
    12812 }
    12813 
    12814 void VmaAllocator_T::MakePoolAllocationsLost(
    12815  VmaPool hPool,
    12816  size_t* pLostAllocationCount)
    12817 {
    12818  hPool->m_BlockVector.MakePoolAllocationsLost(
    12819  m_CurrentFrameIndex.load(),
    12820  pLostAllocationCount);
    12821 }
    12822 
    12823 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
    12824 {
    12825  return hPool->m_BlockVector.CheckCorruption();
    12826 }
    12827 
    12828 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
    12829 {
    12830  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
    12831 
    12832  // Process default pools.
    12833  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12834  {
    12835  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
    12836  {
    12837  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    12838  VMA_ASSERT(pBlockVector);
    12839  VkResult localRes = pBlockVector->CheckCorruption();
    12840  switch(localRes)
    12841  {
    12842  case VK_ERROR_FEATURE_NOT_PRESENT:
    12843  break;
    12844  case VK_SUCCESS:
    12845  finalRes = VK_SUCCESS;
    12846  break;
    12847  default:
    12848  return localRes;
    12849  }
    12850  }
    12851  }
    12852 
    12853  // Process custom pools.
    12854  {
    12855  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12856  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    12857  {
    12858  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
    12859  {
    12860  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
    12861  switch(localRes)
    12862  {
    12863  case VK_ERROR_FEATURE_NOT_PRESENT:
    12864  break;
    12865  case VK_SUCCESS:
    12866  finalRes = VK_SUCCESS;
    12867  break;
    12868  default:
    12869  return localRes;
    12870  }
    12871  }
    12872  }
    12873  }
    12874 
    12875  return finalRes;
    12876 }
    12877 
    12878 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    12879 {
    12880  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
    12881  (*pAllocation)->InitLost();
    12882 }
    12883 
    12884 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    12885 {
    12886  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    12887 
    12888  VkResult res;
    12889  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    12890  {
    12891  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    12892  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    12893  {
    12894  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    12895  if(res == VK_SUCCESS)
    12896  {
    12897  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    12898  }
    12899  }
    12900  else
    12901  {
    12902  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12903  }
    12904  }
    12905  else
    12906  {
    12907  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    12908  }
    12909 
    12910  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    12911  {
    12912  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    12913  }
    12914 
    12915  return res;
    12916 }
    12917 
    12918 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    12919 {
    12920  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    12921  {
    12922  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    12923  }
    12924 
    12925  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    12926 
    12927  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    12928  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    12929  {
    12930  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    12931  m_HeapSizeLimit[heapIndex] += size;
    12932  }
    12933 }
    12934 
    12935 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    12936 {
    12937  if(hAllocation->CanBecomeLost())
    12938  {
    12939  return VK_ERROR_MEMORY_MAP_FAILED;
    12940  }
    12941 
    12942  switch(hAllocation->GetType())
    12943  {
    12944  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12945  {
    12946  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    12947  char *pBytes = VMA_NULL;
    12948  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
    12949  if(res == VK_SUCCESS)
    12950  {
    12951  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    12952  hAllocation->BlockAllocMap();
    12953  }
    12954  return res;
    12955  }
    12956  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12957  return hAllocation->DedicatedAllocMap(this, ppData);
    12958  default:
    12959  VMA_ASSERT(0);
    12960  return VK_ERROR_MEMORY_MAP_FAILED;
    12961  }
    12962 }
    12963 
    12964 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    12965 {
    12966  switch(hAllocation->GetType())
    12967  {
    12968  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12969  {
    12970  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    12971  hAllocation->BlockAllocUnmap();
    12972  pBlock->Unmap(this, 1);
    12973  }
    12974  break;
    12975  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12976  hAllocation->DedicatedAllocUnmap(this);
    12977  break;
    12978  default:
    12979  VMA_ASSERT(0);
    12980  }
    12981 }
    12982 
    12983 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
    12984 {
    12985  VkResult res = VK_SUCCESS;
    12986  switch(hAllocation->GetType())
    12987  {
    12988  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12989  res = GetVulkanFunctions().vkBindBufferMemory(
    12990  m_hDevice,
    12991  hBuffer,
    12992  hAllocation->GetMemory(),
    12993  0); //memoryOffset
    12994  break;
    12995  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12996  {
    12997  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    12998  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
    12999  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
    13000  break;
    13001  }
    13002  default:
    13003  VMA_ASSERT(0);
    13004  }
    13005  return res;
    13006 }
    13007 
    13008 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
    13009 {
    13010  VkResult res = VK_SUCCESS;
    13011  switch(hAllocation->GetType())
    13012  {
    13013  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13014  res = GetVulkanFunctions().vkBindImageMemory(
    13015  m_hDevice,
    13016  hImage,
    13017  hAllocation->GetMemory(),
    13018  0); //memoryOffset
    13019  break;
    13020  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13021  {
    13022  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13023  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
    13024  res = pBlock->BindImageMemory(this, hAllocation, hImage);
    13025  break;
    13026  }
    13027  default:
    13028  VMA_ASSERT(0);
    13029  }
    13030  return res;
    13031 }
    13032 
    13033 void VmaAllocator_T::FlushOrInvalidateAllocation(
    13034  VmaAllocation hAllocation,
    13035  VkDeviceSize offset, VkDeviceSize size,
    13036  VMA_CACHE_OPERATION op)
    13037 {
    13038  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
    13039  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
    13040  {
    13041  const VkDeviceSize allocationSize = hAllocation->GetSize();
    13042  VMA_ASSERT(offset <= allocationSize);
    13043 
    13044  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    13045 
    13046  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    13047  memRange.memory = hAllocation->GetMemory();
    13048 
    13049  switch(hAllocation->GetType())
    13050  {
    13051  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13052  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13053  if(size == VK_WHOLE_SIZE)
    13054  {
    13055  memRange.size = allocationSize - memRange.offset;
    13056  }
    13057  else
    13058  {
    13059  VMA_ASSERT(offset + size <= allocationSize);
    13060  memRange.size = VMA_MIN(
    13061  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
    13062  allocationSize - memRange.offset);
    13063  }
    13064  break;
    13065 
    13066  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13067  {
    13068  // 1. Still within this allocation.
    13069  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13070  if(size == VK_WHOLE_SIZE)
    13071  {
    13072  size = allocationSize - offset;
    13073  }
    13074  else
    13075  {
    13076  VMA_ASSERT(offset + size <= allocationSize);
    13077  }
    13078  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
    13079 
    13080  // 2. Adjust to whole block.
    13081  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
    13082  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
    13083  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
    13084  memRange.offset += allocationOffset;
    13085  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
    13086 
    13087  break;
    13088  }
    13089 
    13090  default:
    13091  VMA_ASSERT(0);
    13092  }
    13093 
    13094  switch(op)
    13095  {
    13096  case VMA_CACHE_FLUSH:
    13097  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13098  break;
    13099  case VMA_CACHE_INVALIDATE:
    13100  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13101  break;
    13102  default:
    13103  VMA_ASSERT(0);
    13104  }
    13105  }
    13106  // else: Just ignore this call.
    13107 }
    13108 
    13109 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    13110 {
    13111  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    13112 
    13113  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    13114  {
    13115  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13116  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    13117  VMA_ASSERT(pDedicatedAllocations);
    13118  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    13119  VMA_ASSERT(success);
    13120  }
    13121 
    13122  VkDeviceMemory hMemory = allocation->GetMemory();
    13123 
    13124  /*
    13125  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    13126  before vkFreeMemory.
    13127 
    13128  if(allocation->GetMappedData() != VMA_NULL)
    13129  {
    13130  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    13131  }
    13132  */
    13133 
    13134  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    13135 
    13136  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    13137 }
    13138 
    13139 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
    13140 {
    13141  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
    13142  !hAllocation->CanBecomeLost() &&
    13143  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13144  {
    13145  void* pData = VMA_NULL;
    13146  VkResult res = Map(hAllocation, &pData);
    13147  if(res == VK_SUCCESS)
    13148  {
    13149  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
    13150  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
    13151  Unmap(hAllocation);
    13152  }
    13153  else
    13154  {
    13155  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
    13156  }
    13157  }
    13158 }
    13159 
    13160 #if VMA_STATS_STRING_ENABLED
    13161 
    13162 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    13163 {
    13164  bool dedicatedAllocationsStarted = false;
    13165  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13166  {
    13167  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13168  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    13169  VMA_ASSERT(pDedicatedAllocVector);
    13170  if(pDedicatedAllocVector->empty() == false)
    13171  {
    13172  if(dedicatedAllocationsStarted == false)
    13173  {
    13174  dedicatedAllocationsStarted = true;
    13175  json.WriteString("DedicatedAllocations");
    13176  json.BeginObject();
    13177  }
    13178 
    13179  json.BeginString("Type ");
    13180  json.ContinueString(memTypeIndex);
    13181  json.EndString();
    13182 
    13183  json.BeginArray();
    13184 
    13185  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    13186  {
    13187  json.BeginObject(true);
    13188  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    13189  hAlloc->PrintParameters(json);
    13190  json.EndObject();
    13191  }
    13192 
    13193  json.EndArray();
    13194  }
    13195  }
    13196  if(dedicatedAllocationsStarted)
    13197  {
    13198  json.EndObject();
    13199  }
    13200 
    13201  {
    13202  bool allocationsStarted = false;
    13203  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13204  {
    13205  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    13206  {
    13207  if(allocationsStarted == false)
    13208  {
    13209  allocationsStarted = true;
    13210  json.WriteString("DefaultPools");
    13211  json.BeginObject();
    13212  }
    13213 
    13214  json.BeginString("Type ");
    13215  json.ContinueString(memTypeIndex);
    13216  json.EndString();
    13217 
    13218  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    13219  }
    13220  }
    13221  if(allocationsStarted)
    13222  {
    13223  json.EndObject();
    13224  }
    13225  }
    13226 
    13227  // Custom pools
    13228  {
    13229  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13230  const size_t poolCount = m_Pools.size();
    13231  if(poolCount > 0)
    13232  {
    13233  json.WriteString("Pools");
    13234  json.BeginObject();
    13235  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    13236  {
    13237  json.BeginString();
    13238  json.ContinueString(m_Pools[poolIndex]->GetId());
    13239  json.EndString();
    13240 
    13241  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    13242  }
    13243  json.EndObject();
    13244  }
    13245  }
    13246 }
    13247 
    13248 #endif // #if VMA_STATS_STRING_ENABLED
    13249 
    13251 // Public interface
    13252 
    13253 VkResult vmaCreateAllocator(
    13254  const VmaAllocatorCreateInfo* pCreateInfo,
    13255  VmaAllocator* pAllocator)
    13256 {
    13257  VMA_ASSERT(pCreateInfo && pAllocator);
    13258  VMA_DEBUG_LOG("vmaCreateAllocator");
    13259  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    13260  return (*pAllocator)->Init(pCreateInfo);
    13261 }
    13262 
    13263 void vmaDestroyAllocator(
    13264  VmaAllocator allocator)
    13265 {
    13266  if(allocator != VK_NULL_HANDLE)
    13267  {
    13268  VMA_DEBUG_LOG("vmaDestroyAllocator");
    13269  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    13270  vma_delete(&allocationCallbacks, allocator);
    13271  }
    13272 }
    13273 
    13275  VmaAllocator allocator,
    13276  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    13277 {
    13278  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    13279  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    13280 }
    13281 
    13283  VmaAllocator allocator,
    13284  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    13285 {
    13286  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    13287  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    13288 }
    13289 
    13291  VmaAllocator allocator,
    13292  uint32_t memoryTypeIndex,
    13293  VkMemoryPropertyFlags* pFlags)
    13294 {
    13295  VMA_ASSERT(allocator && pFlags);
    13296  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    13297  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    13298 }
    13299 
    13301  VmaAllocator allocator,
    13302  uint32_t frameIndex)
    13303 {
    13304  VMA_ASSERT(allocator);
    13305  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    13306 
    13307  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13308 
    13309  allocator->SetCurrentFrameIndex(frameIndex);
    13310 }
    13311 
    13312 void vmaCalculateStats(
    13313  VmaAllocator allocator,
    13314  VmaStats* pStats)
    13315 {
    13316  VMA_ASSERT(allocator && pStats);
    13317  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13318  allocator->CalculateStats(pStats);
    13319 }
    13320 
    13321 #if VMA_STATS_STRING_ENABLED
    13322 
    13323 void vmaBuildStatsString(
    13324  VmaAllocator allocator,
    13325  char** ppStatsString,
    13326  VkBool32 detailedMap)
    13327 {
    13328  VMA_ASSERT(allocator && ppStatsString);
    13329  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13330 
    13331  VmaStringBuilder sb(allocator);
    13332  {
    13333  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    13334  json.BeginObject();
    13335 
    13336  VmaStats stats;
    13337  allocator->CalculateStats(&stats);
    13338 
    13339  json.WriteString("Total");
    13340  VmaPrintStatInfo(json, stats.total);
    13341 
    13342  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    13343  {
    13344  json.BeginString("Heap ");
    13345  json.ContinueString(heapIndex);
    13346  json.EndString();
    13347  json.BeginObject();
    13348 
    13349  json.WriteString("Size");
    13350  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    13351 
    13352  json.WriteString("Flags");
    13353  json.BeginArray(true);
    13354  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    13355  {
    13356  json.WriteString("DEVICE_LOCAL");
    13357  }
    13358  json.EndArray();
    13359 
    13360  if(stats.memoryHeap[heapIndex].blockCount > 0)
    13361  {
    13362  json.WriteString("Stats");
    13363  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    13364  }
    13365 
    13366  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    13367  {
    13368  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    13369  {
    13370  json.BeginString("Type ");
    13371  json.ContinueString(typeIndex);
    13372  json.EndString();
    13373 
    13374  json.BeginObject();
    13375 
    13376  json.WriteString("Flags");
    13377  json.BeginArray(true);
    13378  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    13379  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    13380  {
    13381  json.WriteString("DEVICE_LOCAL");
    13382  }
    13383  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13384  {
    13385  json.WriteString("HOST_VISIBLE");
    13386  }
    13387  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    13388  {
    13389  json.WriteString("HOST_COHERENT");
    13390  }
    13391  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    13392  {
    13393  json.WriteString("HOST_CACHED");
    13394  }
    13395  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    13396  {
    13397  json.WriteString("LAZILY_ALLOCATED");
    13398  }
    13399  json.EndArray();
    13400 
    13401  if(stats.memoryType[typeIndex].blockCount > 0)
    13402  {
    13403  json.WriteString("Stats");
    13404  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    13405  }
    13406 
    13407  json.EndObject();
    13408  }
    13409  }
    13410 
    13411  json.EndObject();
    13412  }
    13413  if(detailedMap == VK_TRUE)
    13414  {
    13415  allocator->PrintDetailedMap(json);
    13416  }
    13417 
    13418  json.EndObject();
    13419  }
    13420 
    13421  const size_t len = sb.GetLength();
    13422  char* const pChars = vma_new_array(allocator, char, len + 1);
    13423  if(len > 0)
    13424  {
    13425  memcpy(pChars, sb.GetData(), len);
    13426  }
    13427  pChars[len] = '\0';
    13428  *ppStatsString = pChars;
    13429 }
    13430 
    13431 void vmaFreeStatsString(
    13432  VmaAllocator allocator,
    13433  char* pStatsString)
    13434 {
    13435  if(pStatsString != VMA_NULL)
    13436  {
    13437  VMA_ASSERT(allocator);
    13438  size_t len = strlen(pStatsString);
    13439  vma_delete_array(allocator, pStatsString, len + 1);
    13440  }
    13441 }
    13442 
    13443 #endif // #if VMA_STATS_STRING_ENABLED
    13444 
    13445 /*
    13446 This function is not protected by any mutex because it just reads immutable data.
    13447 */
    13448 VkResult vmaFindMemoryTypeIndex(
    13449  VmaAllocator allocator,
    13450  uint32_t memoryTypeBits,
    13451  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13452  uint32_t* pMemoryTypeIndex)
    13453 {
    13454  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13455  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13456  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13457 
    13458  if(pAllocationCreateInfo->memoryTypeBits != 0)
    13459  {
    13460  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
    13461  }
    13462 
    13463  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    13464  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    13465 
    13466  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    13467  if(mapped)
    13468  {
    13469  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13470  }
    13471 
    13472  // Convert usage to requiredFlags and preferredFlags.
    13473  switch(pAllocationCreateInfo->usage)
    13474  {
    13476  break;
    13478  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13479  {
    13480  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13481  }
    13482  break;
    13484  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    13485  break;
    13487  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13488  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13489  {
    13490  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13491  }
    13492  break;
    13494  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13495  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    13496  break;
    13497  default:
    13498  break;
    13499  }
    13500 
    13501  *pMemoryTypeIndex = UINT32_MAX;
    13502  uint32_t minCost = UINT32_MAX;
    13503  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    13504  memTypeIndex < allocator->GetMemoryTypeCount();
    13505  ++memTypeIndex, memTypeBit <<= 1)
    13506  {
    13507  // This memory type is acceptable according to memoryTypeBits bitmask.
    13508  if((memTypeBit & memoryTypeBits) != 0)
    13509  {
    13510  const VkMemoryPropertyFlags currFlags =
    13511  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    13512  // This memory type contains requiredFlags.
    13513  if((requiredFlags & ~currFlags) == 0)
    13514  {
    13515  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    13516  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
    13517  // Remember memory type with lowest cost.
    13518  if(currCost < minCost)
    13519  {
    13520  *pMemoryTypeIndex = memTypeIndex;
    13521  if(currCost == 0)
    13522  {
    13523  return VK_SUCCESS;
    13524  }
    13525  minCost = currCost;
    13526  }
    13527  }
    13528  }
    13529  }
    13530  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    13531 }
    13532 
    13534  VmaAllocator allocator,
    13535  const VkBufferCreateInfo* pBufferCreateInfo,
    13536  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13537  uint32_t* pMemoryTypeIndex)
    13538 {
    13539  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13540  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
    13541  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13542  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13543 
    13544  const VkDevice hDev = allocator->m_hDevice;
    13545  VkBuffer hBuffer = VK_NULL_HANDLE;
    13546  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
    13547  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
    13548  if(res == VK_SUCCESS)
    13549  {
    13550  VkMemoryRequirements memReq = {};
    13551  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
    13552  hDev, hBuffer, &memReq);
    13553 
    13554  res = vmaFindMemoryTypeIndex(
    13555  allocator,
    13556  memReq.memoryTypeBits,
    13557  pAllocationCreateInfo,
    13558  pMemoryTypeIndex);
    13559 
    13560  allocator->GetVulkanFunctions().vkDestroyBuffer(
    13561  hDev, hBuffer, allocator->GetAllocationCallbacks());
    13562  }
    13563  return res;
    13564 }
    13565 
    13567  VmaAllocator allocator,
    13568  const VkImageCreateInfo* pImageCreateInfo,
    13569  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13570  uint32_t* pMemoryTypeIndex)
    13571 {
    13572  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13573  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
    13574  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13575  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13576 
    13577  const VkDevice hDev = allocator->m_hDevice;
    13578  VkImage hImage = VK_NULL_HANDLE;
    13579  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
    13580  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
    13581  if(res == VK_SUCCESS)
    13582  {
    13583  VkMemoryRequirements memReq = {};
    13584  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
    13585  hDev, hImage, &memReq);
    13586 
    13587  res = vmaFindMemoryTypeIndex(
    13588  allocator,
    13589  memReq.memoryTypeBits,
    13590  pAllocationCreateInfo,
    13591  pMemoryTypeIndex);
    13592 
    13593  allocator->GetVulkanFunctions().vkDestroyImage(
    13594  hDev, hImage, allocator->GetAllocationCallbacks());
    13595  }
    13596  return res;
    13597 }
    13598 
    13599 VkResult vmaCreatePool(
    13600  VmaAllocator allocator,
    13601  const VmaPoolCreateInfo* pCreateInfo,
    13602  VmaPool* pPool)
    13603 {
    13604  VMA_ASSERT(allocator && pCreateInfo && pPool);
    13605 
    13606  VMA_DEBUG_LOG("vmaCreatePool");
    13607 
    13608  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13609 
    13610  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
    13611 
    13612 #if VMA_RECORDING_ENABLED
    13613  if(allocator->GetRecorder() != VMA_NULL)
    13614  {
    13615  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
    13616  }
    13617 #endif
    13618 
    13619  return res;
    13620 }
    13621 
    13622 void vmaDestroyPool(
    13623  VmaAllocator allocator,
    13624  VmaPool pool)
    13625 {
    13626  VMA_ASSERT(allocator);
    13627 
    13628  if(pool == VK_NULL_HANDLE)
    13629  {
    13630  return;
    13631  }
    13632 
    13633  VMA_DEBUG_LOG("vmaDestroyPool");
    13634 
    13635  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13636 
    13637 #if VMA_RECORDING_ENABLED
    13638  if(allocator->GetRecorder() != VMA_NULL)
    13639  {
    13640  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
    13641  }
    13642 #endif
    13643 
    13644  allocator->DestroyPool(pool);
    13645 }
    13646 
    13647 void vmaGetPoolStats(
    13648  VmaAllocator allocator,
    13649  VmaPool pool,
    13650  VmaPoolStats* pPoolStats)
    13651 {
    13652  VMA_ASSERT(allocator && pool && pPoolStats);
    13653 
    13654  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13655 
    13656  allocator->GetPoolStats(pool, pPoolStats);
    13657 }
    13658 
    13660  VmaAllocator allocator,
    13661  VmaPool pool,
    13662  size_t* pLostAllocationCount)
    13663 {
    13664  VMA_ASSERT(allocator && pool);
    13665 
    13666  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13667 
    13668 #if VMA_RECORDING_ENABLED
    13669  if(allocator->GetRecorder() != VMA_NULL)
    13670  {
    13671  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
    13672  }
    13673 #endif
    13674 
    13675  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    13676 }
    13677 
    13678 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    13679 {
    13680  VMA_ASSERT(allocator && pool);
    13681 
    13682  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13683 
    13684  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
    13685 
    13686  return allocator->CheckPoolCorruption(pool);
    13687 }
    13688 
    13689 VkResult vmaAllocateMemory(
    13690  VmaAllocator allocator,
    13691  const VkMemoryRequirements* pVkMemoryRequirements,
    13692  const VmaAllocationCreateInfo* pCreateInfo,
    13693  VmaAllocation* pAllocation,
    13694  VmaAllocationInfo* pAllocationInfo)
    13695 {
    13696  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    13697 
    13698  VMA_DEBUG_LOG("vmaAllocateMemory");
    13699 
    13700  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13701 
    13702  VkResult result = allocator->AllocateMemory(
    13703  *pVkMemoryRequirements,
    13704  false, // requiresDedicatedAllocation
    13705  false, // prefersDedicatedAllocation
    13706  VK_NULL_HANDLE, // dedicatedBuffer
    13707  VK_NULL_HANDLE, // dedicatedImage
    13708  *pCreateInfo,
    13709  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    13710  pAllocation);
    13711 
    13712 #if VMA_RECORDING_ENABLED
    13713  if(allocator->GetRecorder() != VMA_NULL)
    13714  {
    13715  allocator->GetRecorder()->RecordAllocateMemory(
    13716  allocator->GetCurrentFrameIndex(),
    13717  *pVkMemoryRequirements,
    13718  *pCreateInfo,
    13719  *pAllocation);
    13720  }
    13721 #endif
    13722 
    13723  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    13724  {
    13725  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13726  }
    13727 
    13728  return result;
    13729 }
    13730 
    13732  VmaAllocator allocator,
    13733  VkBuffer buffer,
    13734  const VmaAllocationCreateInfo* pCreateInfo,
    13735  VmaAllocation* pAllocation,
    13736  VmaAllocationInfo* pAllocationInfo)
    13737 {
    13738  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    13739 
    13740  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    13741 
    13742  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13743 
    13744  VkMemoryRequirements vkMemReq = {};
    13745  bool requiresDedicatedAllocation = false;
    13746  bool prefersDedicatedAllocation = false;
    13747  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    13748  requiresDedicatedAllocation,
    13749  prefersDedicatedAllocation);
    13750 
    13751  VkResult result = allocator->AllocateMemory(
    13752  vkMemReq,
    13753  requiresDedicatedAllocation,
    13754  prefersDedicatedAllocation,
    13755  buffer, // dedicatedBuffer
    13756  VK_NULL_HANDLE, // dedicatedImage
    13757  *pCreateInfo,
    13758  VMA_SUBALLOCATION_TYPE_BUFFER,
    13759  pAllocation);
    13760 
    13761 #if VMA_RECORDING_ENABLED
    13762  if(allocator->GetRecorder() != VMA_NULL)
    13763  {
    13764  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
    13765  allocator->GetCurrentFrameIndex(),
    13766  vkMemReq,
    13767  requiresDedicatedAllocation,
    13768  prefersDedicatedAllocation,
    13769  *pCreateInfo,
    13770  *pAllocation);
    13771  }
    13772 #endif
    13773 
    13774  if(pAllocationInfo && result == VK_SUCCESS)
    13775  {
    13776  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13777  }
    13778 
    13779  return result;
    13780 }
    13781 
    13782 VkResult vmaAllocateMemoryForImage(
    13783  VmaAllocator allocator,
    13784  VkImage image,
    13785  const VmaAllocationCreateInfo* pCreateInfo,
    13786  VmaAllocation* pAllocation,
    13787  VmaAllocationInfo* pAllocationInfo)
    13788 {
    13789  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    13790 
    13791  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    13792 
    13793  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13794 
    13795  VkMemoryRequirements vkMemReq = {};
    13796  bool requiresDedicatedAllocation = false;
    13797  bool prefersDedicatedAllocation = false;
    13798  allocator->GetImageMemoryRequirements(image, vkMemReq,
    13799  requiresDedicatedAllocation, prefersDedicatedAllocation);
    13800 
    13801  VkResult result = allocator->AllocateMemory(
    13802  vkMemReq,
    13803  requiresDedicatedAllocation,
    13804  prefersDedicatedAllocation,
    13805  VK_NULL_HANDLE, // dedicatedBuffer
    13806  image, // dedicatedImage
    13807  *pCreateInfo,
    13808  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    13809  pAllocation);
    13810 
    13811 #if VMA_RECORDING_ENABLED
    13812  if(allocator->GetRecorder() != VMA_NULL)
    13813  {
    13814  allocator->GetRecorder()->RecordAllocateMemoryForImage(
    13815  allocator->GetCurrentFrameIndex(),
    13816  vkMemReq,
    13817  requiresDedicatedAllocation,
    13818  prefersDedicatedAllocation,
    13819  *pCreateInfo,
    13820  *pAllocation);
    13821  }
    13822 #endif
    13823 
    13824  if(pAllocationInfo && result == VK_SUCCESS)
    13825  {
    13826  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13827  }
    13828 
    13829  return result;
    13830 }
    13831 
    13832 void vmaFreeMemory(
    13833  VmaAllocator allocator,
    13834  VmaAllocation allocation)
    13835 {
    13836  VMA_ASSERT(allocator);
    13837 
    13838  if(allocation == VK_NULL_HANDLE)
    13839  {
    13840  return;
    13841  }
    13842 
    13843  VMA_DEBUG_LOG("vmaFreeMemory");
    13844 
    13845  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13846 
    13847 #if VMA_RECORDING_ENABLED
    13848  if(allocator->GetRecorder() != VMA_NULL)
    13849  {
    13850  allocator->GetRecorder()->RecordFreeMemory(
    13851  allocator->GetCurrentFrameIndex(),
    13852  allocation);
    13853  }
    13854 #endif
    13855 
    13856  allocator->FreeMemory(allocation);
    13857 }
    13858 
    13860  VmaAllocator allocator,
    13861  VmaAllocation allocation,
    13862  VmaAllocationInfo* pAllocationInfo)
    13863 {
    13864  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    13865 
    13866  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13867 
    13868 #if VMA_RECORDING_ENABLED
    13869  if(allocator->GetRecorder() != VMA_NULL)
    13870  {
    13871  allocator->GetRecorder()->RecordGetAllocationInfo(
    13872  allocator->GetCurrentFrameIndex(),
    13873  allocation);
    13874  }
    13875 #endif
    13876 
    13877  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    13878 }
    13879 
    13880 VkBool32 vmaTouchAllocation(
    13881  VmaAllocator allocator,
    13882  VmaAllocation allocation)
    13883 {
    13884  VMA_ASSERT(allocator && allocation);
    13885 
    13886  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13887 
    13888 #if VMA_RECORDING_ENABLED
    13889  if(allocator->GetRecorder() != VMA_NULL)
    13890  {
    13891  allocator->GetRecorder()->RecordTouchAllocation(
    13892  allocator->GetCurrentFrameIndex(),
    13893  allocation);
    13894  }
    13895 #endif
    13896 
    13897  return allocator->TouchAllocation(allocation);
    13898 }
    13899 
    13901  VmaAllocator allocator,
    13902  VmaAllocation allocation,
    13903  void* pUserData)
    13904 {
    13905  VMA_ASSERT(allocator && allocation);
    13906 
    13907  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13908 
    13909  allocation->SetUserData(allocator, pUserData);
    13910 
    13911 #if VMA_RECORDING_ENABLED
    13912  if(allocator->GetRecorder() != VMA_NULL)
    13913  {
    13914  allocator->GetRecorder()->RecordSetAllocationUserData(
    13915  allocator->GetCurrentFrameIndex(),
    13916  allocation,
    13917  pUserData);
    13918  }
    13919 #endif
    13920 }
    13921 
    13923  VmaAllocator allocator,
    13924  VmaAllocation* pAllocation)
    13925 {
    13926  VMA_ASSERT(allocator && pAllocation);
    13927 
    13928  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    13929 
    13930  allocator->CreateLostAllocation(pAllocation);
    13931 
    13932 #if VMA_RECORDING_ENABLED
    13933  if(allocator->GetRecorder() != VMA_NULL)
    13934  {
    13935  allocator->GetRecorder()->RecordCreateLostAllocation(
    13936  allocator->GetCurrentFrameIndex(),
    13937  *pAllocation);
    13938  }
    13939 #endif
    13940 }
    13941 
    13942 VkResult vmaMapMemory(
    13943  VmaAllocator allocator,
    13944  VmaAllocation allocation,
    13945  void** ppData)
    13946 {
    13947  VMA_ASSERT(allocator && allocation && ppData);
    13948 
    13949  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13950 
    13951  VkResult res = allocator->Map(allocation, ppData);
    13952 
    13953 #if VMA_RECORDING_ENABLED
    13954  if(allocator->GetRecorder() != VMA_NULL)
    13955  {
    13956  allocator->GetRecorder()->RecordMapMemory(
    13957  allocator->GetCurrentFrameIndex(),
    13958  allocation);
    13959  }
    13960 #endif
    13961 
    13962  return res;
    13963 }
    13964 
    13965 void vmaUnmapMemory(
    13966  VmaAllocator allocator,
    13967  VmaAllocation allocation)
    13968 {
    13969  VMA_ASSERT(allocator && allocation);
    13970 
    13971  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13972 
    13973 #if VMA_RECORDING_ENABLED
    13974  if(allocator->GetRecorder() != VMA_NULL)
    13975  {
    13976  allocator->GetRecorder()->RecordUnmapMemory(
    13977  allocator->GetCurrentFrameIndex(),
    13978  allocation);
    13979  }
    13980 #endif
    13981 
    13982  allocator->Unmap(allocation);
    13983 }
    13984 
    13985 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    13986 {
    13987  VMA_ASSERT(allocator && allocation);
    13988 
    13989  VMA_DEBUG_LOG("vmaFlushAllocation");
    13990 
    13991  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13992 
    13993  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
    13994 
    13995 #if VMA_RECORDING_ENABLED
    13996  if(allocator->GetRecorder() != VMA_NULL)
    13997  {
    13998  allocator->GetRecorder()->RecordFlushAllocation(
    13999  allocator->GetCurrentFrameIndex(),
    14000  allocation, offset, size);
    14001  }
    14002 #endif
    14003 }
    14004 
    14005 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    14006 {
    14007  VMA_ASSERT(allocator && allocation);
    14008 
    14009  VMA_DEBUG_LOG("vmaInvalidateAllocation");
    14010 
    14011  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14012 
    14013  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
    14014 
    14015 #if VMA_RECORDING_ENABLED
    14016  if(allocator->GetRecorder() != VMA_NULL)
    14017  {
    14018  allocator->GetRecorder()->RecordInvalidateAllocation(
    14019  allocator->GetCurrentFrameIndex(),
    14020  allocation, offset, size);
    14021  }
    14022 #endif
    14023 }
    14024 
    14025 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    14026 {
    14027  VMA_ASSERT(allocator);
    14028 
    14029  VMA_DEBUG_LOG("vmaCheckCorruption");
    14030 
    14031  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14032 
    14033  return allocator->CheckCorruption(memoryTypeBits);
    14034 }
    14035 
    14036 VkResult vmaDefragment(
    14037  VmaAllocator allocator,
    14038  VmaAllocation* pAllocations,
    14039  size_t allocationCount,
    14040  VkBool32* pAllocationsChanged,
    14041  const VmaDefragmentationInfo *pDefragmentationInfo,
    14042  VmaDefragmentationStats* pDefragmentationStats)
    14043 {
    14044  VMA_ASSERT(allocator && pAllocations);
    14045 
    14046  VMA_DEBUG_LOG("vmaDefragment");
    14047 
    14048  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14049 
    14050  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
    14051 }
    14052 
    14053 VkResult vmaBindBufferMemory(
    14054  VmaAllocator allocator,
    14055  VmaAllocation allocation,
    14056  VkBuffer buffer)
    14057 {
    14058  VMA_ASSERT(allocator && allocation && buffer);
    14059 
    14060  VMA_DEBUG_LOG("vmaBindBufferMemory");
    14061 
    14062  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14063 
    14064  return allocator->BindBufferMemory(allocation, buffer);
    14065 }
    14066 
    14067 VkResult vmaBindImageMemory(
    14068  VmaAllocator allocator,
    14069  VmaAllocation allocation,
    14070  VkImage image)
    14071 {
    14072  VMA_ASSERT(allocator && allocation && image);
    14073 
    14074  VMA_DEBUG_LOG("vmaBindImageMemory");
    14075 
    14076  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14077 
    14078  return allocator->BindImageMemory(allocation, image);
    14079 }
    14080 
    14081 VkResult vmaCreateBuffer(
    14082  VmaAllocator allocator,
    14083  const VkBufferCreateInfo* pBufferCreateInfo,
    14084  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14085  VkBuffer* pBuffer,
    14086  VmaAllocation* pAllocation,
    14087  VmaAllocationInfo* pAllocationInfo)
    14088 {
    14089  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    14090 
    14091  VMA_DEBUG_LOG("vmaCreateBuffer");
    14092 
    14093  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14094 
    14095  *pBuffer = VK_NULL_HANDLE;
    14096  *pAllocation = VK_NULL_HANDLE;
    14097 
    14098  // 1. Create VkBuffer.
    14099  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    14100  allocator->m_hDevice,
    14101  pBufferCreateInfo,
    14102  allocator->GetAllocationCallbacks(),
    14103  pBuffer);
    14104  if(res >= 0)
    14105  {
    14106  // 2. vkGetBufferMemoryRequirements.
    14107  VkMemoryRequirements vkMemReq = {};
    14108  bool requiresDedicatedAllocation = false;
    14109  bool prefersDedicatedAllocation = false;
    14110  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    14111  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14112 
    14113  // Make sure alignment requirements for specific buffer usages reported
    14114  // in Physical Device Properties are included in alignment reported by memory requirements.
    14115  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
    14116  {
    14117  VMA_ASSERT(vkMemReq.alignment %
    14118  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
    14119  }
    14120  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
    14121  {
    14122  VMA_ASSERT(vkMemReq.alignment %
    14123  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
    14124  }
    14125  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
    14126  {
    14127  VMA_ASSERT(vkMemReq.alignment %
    14128  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
    14129  }
    14130 
    14131  // 3. Allocate memory using allocator.
    14132  res = allocator->AllocateMemory(
    14133  vkMemReq,
    14134  requiresDedicatedAllocation,
    14135  prefersDedicatedAllocation,
    14136  *pBuffer, // dedicatedBuffer
    14137  VK_NULL_HANDLE, // dedicatedImage
    14138  *pAllocationCreateInfo,
    14139  VMA_SUBALLOCATION_TYPE_BUFFER,
    14140  pAllocation);
    14141 
    14142 #if VMA_RECORDING_ENABLED
    14143  if(allocator->GetRecorder() != VMA_NULL)
    14144  {
    14145  allocator->GetRecorder()->RecordCreateBuffer(
    14146  allocator->GetCurrentFrameIndex(),
    14147  *pBufferCreateInfo,
    14148  *pAllocationCreateInfo,
    14149  *pAllocation);
    14150  }
    14151 #endif
    14152 
    14153  if(res >= 0)
    14154  {
    14155  // 3. Bind buffer with memory.
    14156  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
    14157  if(res >= 0)
    14158  {
    14159  // All steps succeeded.
    14160  #if VMA_STATS_STRING_ENABLED
    14161  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
    14162  #endif
    14163  if(pAllocationInfo != VMA_NULL)
    14164  {
    14165  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14166  }
    14167 
    14168  return VK_SUCCESS;
    14169  }
    14170  allocator->FreeMemory(*pAllocation);
    14171  *pAllocation = VK_NULL_HANDLE;
    14172  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14173  *pBuffer = VK_NULL_HANDLE;
    14174  return res;
    14175  }
    14176  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14177  *pBuffer = VK_NULL_HANDLE;
    14178  return res;
    14179  }
    14180  return res;
    14181 }
    14182 
    14183 void vmaDestroyBuffer(
    14184  VmaAllocator allocator,
    14185  VkBuffer buffer,
    14186  VmaAllocation allocation)
    14187 {
    14188  VMA_ASSERT(allocator);
    14189 
    14190  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14191  {
    14192  return;
    14193  }
    14194 
    14195  VMA_DEBUG_LOG("vmaDestroyBuffer");
    14196 
    14197  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14198 
    14199 #if VMA_RECORDING_ENABLED
    14200  if(allocator->GetRecorder() != VMA_NULL)
    14201  {
    14202  allocator->GetRecorder()->RecordDestroyBuffer(
    14203  allocator->GetCurrentFrameIndex(),
    14204  allocation);
    14205  }
    14206 #endif
    14207 
    14208  if(buffer != VK_NULL_HANDLE)
    14209  {
    14210  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    14211  }
    14212 
    14213  if(allocation != VK_NULL_HANDLE)
    14214  {
    14215  allocator->FreeMemory(allocation);
    14216  }
    14217 }
    14218 
    14219 VkResult vmaCreateImage(
    14220  VmaAllocator allocator,
    14221  const VkImageCreateInfo* pImageCreateInfo,
    14222  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14223  VkImage* pImage,
    14224  VmaAllocation* pAllocation,
    14225  VmaAllocationInfo* pAllocationInfo)
    14226 {
    14227  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    14228 
    14229  VMA_DEBUG_LOG("vmaCreateImage");
    14230 
    14231  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14232 
    14233  *pImage = VK_NULL_HANDLE;
    14234  *pAllocation = VK_NULL_HANDLE;
    14235 
    14236  // 1. Create VkImage.
    14237  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    14238  allocator->m_hDevice,
    14239  pImageCreateInfo,
    14240  allocator->GetAllocationCallbacks(),
    14241  pImage);
    14242  if(res >= 0)
    14243  {
    14244  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    14245  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    14246  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    14247 
    14248  // 2. Allocate memory using allocator.
    14249  VkMemoryRequirements vkMemReq = {};
    14250  bool requiresDedicatedAllocation = false;
    14251  bool prefersDedicatedAllocation = false;
    14252  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
    14253  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14254 
    14255  res = allocator->AllocateMemory(
    14256  vkMemReq,
    14257  requiresDedicatedAllocation,
    14258  prefersDedicatedAllocation,
    14259  VK_NULL_HANDLE, // dedicatedBuffer
    14260  *pImage, // dedicatedImage
    14261  *pAllocationCreateInfo,
    14262  suballocType,
    14263  pAllocation);
    14264 
    14265 #if VMA_RECORDING_ENABLED
    14266  if(allocator->GetRecorder() != VMA_NULL)
    14267  {
    14268  allocator->GetRecorder()->RecordCreateImage(
    14269  allocator->GetCurrentFrameIndex(),
    14270  *pImageCreateInfo,
    14271  *pAllocationCreateInfo,
    14272  *pAllocation);
    14273  }
    14274 #endif
    14275 
    14276  if(res >= 0)
    14277  {
    14278  // 3. Bind image with memory.
    14279  res = allocator->BindImageMemory(*pAllocation, *pImage);
    14280  if(res >= 0)
    14281  {
    14282  // All steps succeeded.
    14283  #if VMA_STATS_STRING_ENABLED
    14284  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
    14285  #endif
    14286  if(pAllocationInfo != VMA_NULL)
    14287  {
    14288  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14289  }
    14290 
    14291  return VK_SUCCESS;
    14292  }
    14293  allocator->FreeMemory(*pAllocation);
    14294  *pAllocation = VK_NULL_HANDLE;
    14295  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14296  *pImage = VK_NULL_HANDLE;
    14297  return res;
    14298  }
    14299  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14300  *pImage = VK_NULL_HANDLE;
    14301  return res;
    14302  }
    14303  return res;
    14304 }
    14305 
    14306 void vmaDestroyImage(
    14307  VmaAllocator allocator,
    14308  VkImage image,
    14309  VmaAllocation allocation)
    14310 {
    14311  VMA_ASSERT(allocator);
    14312 
    14313  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14314  {
    14315  return;
    14316  }
    14317 
    14318  VMA_DEBUG_LOG("vmaDestroyImage");
    14319 
    14320  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14321 
    14322 #if VMA_RECORDING_ENABLED
    14323  if(allocator->GetRecorder() != VMA_NULL)
    14324  {
    14325  allocator->GetRecorder()->RecordDestroyImage(
    14326  allocator->GetCurrentFrameIndex(),
    14327  allocation);
    14328  }
    14329 #endif
    14330 
    14331  if(image != VK_NULL_HANDLE)
    14332  {
    14333  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    14334  }
    14335  if(allocation != VK_NULL_HANDLE)
    14336  {
    14337  allocator->FreeMemory(allocation);
    14338  }
    14339 }
    14340 
    14341 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:1567
    +Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    1460 /*
    1461 Define this macro to 0/1 to disable/enable support for recording functionality,
    1462 available through VmaAllocatorCreateInfo::pRecordSettings.
    1463 */
    1464 #ifndef VMA_RECORDING_ENABLED
    1465  #ifdef _WIN32
    1466  #define VMA_RECORDING_ENABLED 1
    1467  #else
    1468  #define VMA_RECORDING_ENABLED 0
    1469  #endif
    1470 #endif
    1471 
    1472 #ifndef NOMINMAX
    1473  #define NOMINMAX // For windows.h
    1474 #endif
    1475 
    1476 #include <vulkan/vulkan.h>
    1477 
    1478 #if VMA_RECORDING_ENABLED
    1479  #include <windows.h>
    1480 #endif
    1481 
    1482 #if !defined(VMA_DEDICATED_ALLOCATION)
    1483  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
    1484  #define VMA_DEDICATED_ALLOCATION 1
    1485  #else
    1486  #define VMA_DEDICATED_ALLOCATION 0
    1487  #endif
    1488 #endif
    1489 
    1499 VK_DEFINE_HANDLE(VmaAllocator)
    1500 
    1501 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    1503  VmaAllocator allocator,
    1504  uint32_t memoryType,
    1505  VkDeviceMemory memory,
    1506  VkDeviceSize size);
    1508 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    1509  VmaAllocator allocator,
    1510  uint32_t memoryType,
    1511  VkDeviceMemory memory,
    1512  VkDeviceSize size);
    1513 
    1527 
    1557 
    1560 typedef VkFlags VmaAllocatorCreateFlags;
    1561 
    1566 typedef struct VmaVulkanFunctions {
    1567  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    1568  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    1569  PFN_vkAllocateMemory vkAllocateMemory;
    1570  PFN_vkFreeMemory vkFreeMemory;
    1571  PFN_vkMapMemory vkMapMemory;
    1572  PFN_vkUnmapMemory vkUnmapMemory;
    1573  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
    1574  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
    1575  PFN_vkBindBufferMemory vkBindBufferMemory;
    1576  PFN_vkBindImageMemory vkBindImageMemory;
    1577  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    1578  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    1579  PFN_vkCreateBuffer vkCreateBuffer;
    1580  PFN_vkDestroyBuffer vkDestroyBuffer;
    1581  PFN_vkCreateImage vkCreateImage;
    1582  PFN_vkDestroyImage vkDestroyImage;
    1583 #if VMA_DEDICATED_ALLOCATION
    1584  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    1585  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    1586 #endif
    1588 
    1590 typedef enum VmaRecordFlagBits {
    1597 
    1600 typedef VkFlags VmaRecordFlags;
    1601 
    1603 typedef struct VmaRecordSettings
    1604 {
    1614  const char* pFilePath;
    1616 
    1619 {
    1623 
    1624  VkPhysicalDevice physicalDevice;
    1626 
    1627  VkDevice device;
    1629 
    1632 
    1633  const VkAllocationCallbacks* pAllocationCallbacks;
    1635 
    1674  const VkDeviceSize* pHeapSizeLimit;
    1695 
    1697 VkResult vmaCreateAllocator(
    1698  const VmaAllocatorCreateInfo* pCreateInfo,
    1699  VmaAllocator* pAllocator);
    1700 
    1702 void vmaDestroyAllocator(
    1703  VmaAllocator allocator);
    1704 
    1710  VmaAllocator allocator,
    1711  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    1712 
    1718  VmaAllocator allocator,
    1719  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    1720 
    1728  VmaAllocator allocator,
    1729  uint32_t memoryTypeIndex,
    1730  VkMemoryPropertyFlags* pFlags);
    1731 
    1741  VmaAllocator allocator,
    1742  uint32_t frameIndex);
    1743 
    1746 typedef struct VmaStatInfo
    1747 {
    1749  uint32_t blockCount;
    1755  VkDeviceSize usedBytes;
    1757  VkDeviceSize unusedBytes;
    1760 } VmaStatInfo;
    1761 
    1763 typedef struct VmaStats
    1764 {
    1765  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    1766  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    1768 } VmaStats;
    1769 
    1771 void vmaCalculateStats(
    1772  VmaAllocator allocator,
    1773  VmaStats* pStats);
    1774 
    1775 #define VMA_STATS_STRING_ENABLED 1
    1776 
    1777 #if VMA_STATS_STRING_ENABLED
    1778 
    1780 
    1782 void vmaBuildStatsString(
    1783  VmaAllocator allocator,
    1784  char** ppStatsString,
    1785  VkBool32 detailedMap);
    1786 
    1787 void vmaFreeStatsString(
    1788  VmaAllocator allocator,
    1789  char* pStatsString);
    1790 
    1791 #endif // #if VMA_STATS_STRING_ENABLED
    1792 
    1801 VK_DEFINE_HANDLE(VmaPool)
    1802 
    1803 typedef enum VmaMemoryUsage
    1804 {
    1853 } VmaMemoryUsage;
    1854 
    1869 
    1924 
    1937 
    1947 
    1954 
    1958 
    1960 {
    1973  VkMemoryPropertyFlags requiredFlags;
    1978  VkMemoryPropertyFlags preferredFlags;
    1986  uint32_t memoryTypeBits;
    1999  void* pUserData;
    2001 
    2018 VkResult vmaFindMemoryTypeIndex(
    2019  VmaAllocator allocator,
    2020  uint32_t memoryTypeBits,
    2021  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2022  uint32_t* pMemoryTypeIndex);
    2023 
    2037  VmaAllocator allocator,
    2038  const VkBufferCreateInfo* pBufferCreateInfo,
    2039  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2040  uint32_t* pMemoryTypeIndex);
    2041 
    2055  VmaAllocator allocator,
    2056  const VkImageCreateInfo* pImageCreateInfo,
    2057  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2058  uint32_t* pMemoryTypeIndex);
    2059 
    2080 
    2097 
    2108 
    2114 
    2117 typedef VkFlags VmaPoolCreateFlags;
    2118 
    2121 typedef struct VmaPoolCreateInfo {
    2136  VkDeviceSize blockSize;
    2165 
    2168 typedef struct VmaPoolStats {
    2171  VkDeviceSize size;
    2174  VkDeviceSize unusedSize;
    2187  VkDeviceSize unusedRangeSizeMax;
    2190  size_t blockCount;
    2191 } VmaPoolStats;
    2192 
    2199 VkResult vmaCreatePool(
    2200  VmaAllocator allocator,
    2201  const VmaPoolCreateInfo* pCreateInfo,
    2202  VmaPool* pPool);
    2203 
    2206 void vmaDestroyPool(
    2207  VmaAllocator allocator,
    2208  VmaPool pool);
    2209 
    2216 void vmaGetPoolStats(
    2217  VmaAllocator allocator,
    2218  VmaPool pool,
    2219  VmaPoolStats* pPoolStats);
    2220 
    2228  VmaAllocator allocator,
    2229  VmaPool pool,
    2230  size_t* pLostAllocationCount);
    2231 
    2246 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
    2247 
    2272 VK_DEFINE_HANDLE(VmaAllocation)
    2273 
    2274 
    2276 typedef struct VmaAllocationInfo {
    2281  uint32_t memoryType;
    2290  VkDeviceMemory deviceMemory;
    2295  VkDeviceSize offset;
    2300  VkDeviceSize size;
    2314  void* pUserData;
    2316 
    2327 VkResult vmaAllocateMemory(
    2328  VmaAllocator allocator,
    2329  const VkMemoryRequirements* pVkMemoryRequirements,
    2330  const VmaAllocationCreateInfo* pCreateInfo,
    2331  VmaAllocation* pAllocation,
    2332  VmaAllocationInfo* pAllocationInfo);
    2333 
    2357 VkResult vmaAllocateMemoryPages(
    2358  VmaAllocator allocator,
    2359  const VkMemoryRequirements* pVkMemoryRequirements,
    2360  const VmaAllocationCreateInfo* pCreateInfo,
    2361  size_t allocationCount,
    2362  VmaAllocation* pAllocations,
    2363  VmaAllocationInfo* pAllocationInfo);
    2364 
    2372  VmaAllocator allocator,
    2373  VkBuffer buffer,
    2374  const VmaAllocationCreateInfo* pCreateInfo,
    2375  VmaAllocation* pAllocation,
    2376  VmaAllocationInfo* pAllocationInfo);
    2377 
    2379 VkResult vmaAllocateMemoryForImage(
    2380  VmaAllocator allocator,
    2381  VkImage image,
    2382  const VmaAllocationCreateInfo* pCreateInfo,
    2383  VmaAllocation* pAllocation,
    2384  VmaAllocationInfo* pAllocationInfo);
    2385 
    2390 void vmaFreeMemory(
    2391  VmaAllocator allocator,
    2392  VmaAllocation allocation);
    2393 
    2406 void vmaFreeMemoryPages(
    2407  VmaAllocator allocator,
    2408  size_t allocationCount,
    2409  VmaAllocation* pAllocations);
    2410 
    2428  VmaAllocator allocator,
    2429  VmaAllocation allocation,
    2430  VmaAllocationInfo* pAllocationInfo);
    2431 
    2446 VkBool32 vmaTouchAllocation(
    2447  VmaAllocator allocator,
    2448  VmaAllocation allocation);
    2449 
    2464  VmaAllocator allocator,
    2465  VmaAllocation allocation,
    2466  void* pUserData);
    2467 
    2479  VmaAllocator allocator,
    2480  VmaAllocation* pAllocation);
    2481 
    2516 VkResult vmaMapMemory(
    2517  VmaAllocator allocator,
    2518  VmaAllocation allocation,
    2519  void** ppData);
    2520 
    2525 void vmaUnmapMemory(
    2526  VmaAllocator allocator,
    2527  VmaAllocation allocation);
    2528 
    2541 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2542 
    2555 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2556 
    2573 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
    2574 
    2576 typedef struct VmaDefragmentationInfo {
    2581  VkDeviceSize maxBytesToMove;
    2588 
    2590 typedef struct VmaDefragmentationStats {
    2592  VkDeviceSize bytesMoved;
    2594  VkDeviceSize bytesFreed;
    2600 
    2639 VkResult vmaDefragment(
    2640  VmaAllocator allocator,
    2641  VmaAllocation* pAllocations,
    2642  size_t allocationCount,
    2643  VkBool32* pAllocationsChanged,
    2644  const VmaDefragmentationInfo *pDefragmentationInfo,
    2645  VmaDefragmentationStats* pDefragmentationStats);
    2646 
    2659 VkResult vmaBindBufferMemory(
    2660  VmaAllocator allocator,
    2661  VmaAllocation allocation,
    2662  VkBuffer buffer);
    2663 
    2676 VkResult vmaBindImageMemory(
    2677  VmaAllocator allocator,
    2678  VmaAllocation allocation,
    2679  VkImage image);
    2680 
    2707 VkResult vmaCreateBuffer(
    2708  VmaAllocator allocator,
    2709  const VkBufferCreateInfo* pBufferCreateInfo,
    2710  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2711  VkBuffer* pBuffer,
    2712  VmaAllocation* pAllocation,
    2713  VmaAllocationInfo* pAllocationInfo);
    2714 
    2726 void vmaDestroyBuffer(
    2727  VmaAllocator allocator,
    2728  VkBuffer buffer,
    2729  VmaAllocation allocation);
    2730 
    2732 VkResult vmaCreateImage(
    2733  VmaAllocator allocator,
    2734  const VkImageCreateInfo* pImageCreateInfo,
    2735  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2736  VkImage* pImage,
    2737  VmaAllocation* pAllocation,
    2738  VmaAllocationInfo* pAllocationInfo);
    2739 
    2751 void vmaDestroyImage(
    2752  VmaAllocator allocator,
    2753  VkImage image,
    2754  VmaAllocation allocation);
    2755 
    2756 #ifdef __cplusplus
    2757 }
    2758 #endif
    2759 
    2760 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    2761 
    2762 // For Visual Studio IntelliSense.
    2763 #if defined(__cplusplus) && defined(__INTELLISENSE__)
    2764 #define VMA_IMPLEMENTATION
    2765 #endif
    2766 
    2767 #ifdef VMA_IMPLEMENTATION
    2768 #undef VMA_IMPLEMENTATION
    2769 
    2770 #include <cstdint>
    2771 #include <cstdlib>
    2772 #include <cstring>
    2773 
    2774 /*******************************************************************************
    2775 CONFIGURATION SECTION
    2776 
    2777 Define some of these macros before each #include of this header or change them
    2778 here if you need other then default behavior depending on your environment.
    2779 */
    2780 
    2781 /*
    2782 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    2783 internally, like:
    2784 
    2785  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    2786 
    2787 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    2788 VmaAllocatorCreateInfo::pVulkanFunctions.
    2789 */
    2790 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
    2791 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    2792 #endif
    2793 
    2794 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    2795 //#define VMA_USE_STL_CONTAINERS 1
    2796 
    2797 /* Set this macro to 1 to make the library including and using STL containers:
    2798 std::pair, std::vector, std::list, std::unordered_map.
    2799 
    2800 Set it to 0 or undefined to make the library using its own implementation of
    2801 the containers.
    2802 */
    2803 #if VMA_USE_STL_CONTAINERS
    2804  #define VMA_USE_STL_VECTOR 1
    2805  #define VMA_USE_STL_UNORDERED_MAP 1
    2806  #define VMA_USE_STL_LIST 1
    2807 #endif
    2808 
    2809 #if VMA_USE_STL_VECTOR
    2810  #include <vector>
    2811 #endif
    2812 
    2813 #if VMA_USE_STL_UNORDERED_MAP
    2814  #include <unordered_map>
    2815 #endif
    2816 
    2817 #if VMA_USE_STL_LIST
    2818  #include <list>
    2819 #endif
    2820 
    2821 /*
    2822 Following headers are used in this CONFIGURATION section only, so feel free to
    2823 remove them if not needed.
    2824 */
    2825 #include <cassert> // for assert
    2826 #include <algorithm> // for min, max
    2827 #include <mutex> // for std::mutex
    2828 #include <atomic> // for std::atomic
    2829 
    2830 #ifndef VMA_NULL
    2831  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    2832  #define VMA_NULL nullptr
    2833 #endif
    2834 
    2835 #if defined(__APPLE__) || defined(__ANDROID__)
    2836 #include <cstdlib>
    2837 void *aligned_alloc(size_t alignment, size_t size)
    2838 {
    2839  // alignment must be >= sizeof(void*)
    2840  if(alignment < sizeof(void*))
    2841  {
    2842  alignment = sizeof(void*);
    2843  }
    2844 
    2845  void *pointer;
    2846  if(posix_memalign(&pointer, alignment, size) == 0)
    2847  return pointer;
    2848  return VMA_NULL;
    2849 }
    2850 #endif
    2851 
    2852 // If your compiler is not compatible with C++11 and definition of
    2853 // aligned_alloc() function is missing, uncommeting following line may help:
    2854 
    2855 //#include <malloc.h>
    2856 
    2857 // Normal assert to check for programmer's errors, especially in Debug configuration.
    2858 #ifndef VMA_ASSERT
    2859  #ifdef _DEBUG
    2860  #define VMA_ASSERT(expr) assert(expr)
    2861  #else
    2862  #define VMA_ASSERT(expr)
    2863  #endif
    2864 #endif
    2865 
    2866 // Assert that will be called very often, like inside data structures e.g. operator[].
    2867 // Making it non-empty can make program slow.
    2868 #ifndef VMA_HEAVY_ASSERT
    2869  #ifdef _DEBUG
    2870  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    2871  #else
    2872  #define VMA_HEAVY_ASSERT(expr)
    2873  #endif
    2874 #endif
    2875 
    2876 #ifndef VMA_ALIGN_OF
    2877  #define VMA_ALIGN_OF(type) (__alignof(type))
    2878 #endif
    2879 
    2880 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    2881  #if defined(_WIN32)
    2882  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    2883  #else
    2884  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    2885  #endif
    2886 #endif
    2887 
    2888 #ifndef VMA_SYSTEM_FREE
    2889  #if defined(_WIN32)
    2890  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    2891  #else
    2892  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    2893  #endif
    2894 #endif
    2895 
    2896 #ifndef VMA_MIN
    2897  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    2898 #endif
    2899 
    2900 #ifndef VMA_MAX
    2901  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    2902 #endif
    2903 
    2904 #ifndef VMA_SWAP
    2905  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    2906 #endif
    2907 
    2908 #ifndef VMA_SORT
    2909  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    2910 #endif
    2911 
    2912 #ifndef VMA_DEBUG_LOG
    2913  #define VMA_DEBUG_LOG(format, ...)
    2914  /*
    2915  #define VMA_DEBUG_LOG(format, ...) do { \
    2916  printf(format, __VA_ARGS__); \
    2917  printf("\n"); \
    2918  } while(false)
    2919  */
    2920 #endif
    2921 
    2922 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    2923 #if VMA_STATS_STRING_ENABLED
    2924  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    2925  {
    2926  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    2927  }
    2928  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    2929  {
    2930  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    2931  }
    2932  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    2933  {
    2934  snprintf(outStr, strLen, "%p", ptr);
    2935  }
    2936 #endif
    2937 
    2938 #ifndef VMA_MUTEX
    2939  class VmaMutex
    2940  {
    2941  public:
    2942  VmaMutex() { }
    2943  ~VmaMutex() { }
    2944  void Lock() { m_Mutex.lock(); }
    2945  void Unlock() { m_Mutex.unlock(); }
    2946  private:
    2947  std::mutex m_Mutex;
    2948  };
    2949  #define VMA_MUTEX VmaMutex
    2950 #endif
    2951 
    2952 /*
    2953 If providing your own implementation, you need to implement a subset of std::atomic:
    2954 
    2955 - Constructor(uint32_t desired)
    2956 - uint32_t load() const
    2957 - void store(uint32_t desired)
    2958 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    2959 */
    2960 #ifndef VMA_ATOMIC_UINT32
    2961  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    2962 #endif
    2963 
    2964 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    2965 
    2969  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    2970 #endif
    2971 
    2972 #ifndef VMA_DEBUG_ALIGNMENT
    2973 
    2977  #define VMA_DEBUG_ALIGNMENT (1)
    2978 #endif
    2979 
    2980 #ifndef VMA_DEBUG_MARGIN
    2981 
    2985  #define VMA_DEBUG_MARGIN (0)
    2986 #endif
    2987 
    2988 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
    2989 
    2993  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
    2994 #endif
    2995 
    2996 #ifndef VMA_DEBUG_DETECT_CORRUPTION
    2997 
    3002  #define VMA_DEBUG_DETECT_CORRUPTION (0)
    3003 #endif
    3004 
    3005 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    3006 
    3010  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    3011 #endif
    3012 
    3013 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    3014 
    3018  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    3019 #endif
    3020 
    3021 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    3022  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
    3024 #endif
    3025 
    3026 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    3027  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
    3029 #endif
    3030 
    3031 #ifndef VMA_CLASS_NO_COPY
    3032  #define VMA_CLASS_NO_COPY(className) \
    3033  private: \
    3034  className(const className&) = delete; \
    3035  className& operator=(const className&) = delete;
    3036 #endif
    3037 
    3038 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    3039 
    3040 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
    3041 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
    3042 
    3043 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
    3044 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
    3045 
    3046 /*******************************************************************************
    3047 END OF CONFIGURATION
    3048 */
    3049 
    3050 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    3051  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    3052 
    3053 // Returns number of bits set to 1 in (v).
    3054 static inline uint32_t VmaCountBitsSet(uint32_t v)
    3055 {
    3056  uint32_t c = v - ((v >> 1) & 0x55555555);
    3057  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    3058  c = ((c >> 4) + c) & 0x0F0F0F0F;
    3059  c = ((c >> 8) + c) & 0x00FF00FF;
    3060  c = ((c >> 16) + c) & 0x0000FFFF;
    3061  return c;
    3062 }
    3063 
    3064 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    3065 // Use types like uint32_t, uint64_t as T.
    3066 template <typename T>
    3067 static inline T VmaAlignUp(T val, T align)
    3068 {
    3069  return (val + align - 1) / align * align;
    3070 }
    3071 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
    3072 // Use types like uint32_t, uint64_t as T.
    3073 template <typename T>
    3074 static inline T VmaAlignDown(T val, T align)
    3075 {
    3076  return val / align * align;
    3077 }
    3078 
    3079 // Division with mathematical rounding to nearest number.
    3080 template <typename T>
    3081 static inline T VmaRoundDiv(T x, T y)
    3082 {
    3083  return (x + (y / (T)2)) / y;
    3084 }
    3085 
    3086 /*
    3087 Returns true if given number is a power of two.
    3088 T must be unsigned integer number or signed integer but always nonnegative.
    3089 For 0 returns true.
    3090 */
    3091 template <typename T>
    3092 inline bool VmaIsPow2(T x)
    3093 {
    3094  return (x & (x-1)) == 0;
    3095 }
    3096 
    3097 // Returns smallest power of 2 greater or equal to v.
    3098 static inline uint32_t VmaNextPow2(uint32_t v)
    3099 {
    3100  v--;
    3101  v |= v >> 1;
    3102  v |= v >> 2;
    3103  v |= v >> 4;
    3104  v |= v >> 8;
    3105  v |= v >> 16;
    3106  v++;
    3107  return v;
    3108 }
    3109 static inline uint64_t VmaNextPow2(uint64_t v)
    3110 {
    3111  v--;
    3112  v |= v >> 1;
    3113  v |= v >> 2;
    3114  v |= v >> 4;
    3115  v |= v >> 8;
    3116  v |= v >> 16;
    3117  v |= v >> 32;
    3118  v++;
    3119  return v;
    3120 }
    3121 
    3122 // Returns largest power of 2 less or equal to v.
    3123 static inline uint32_t VmaPrevPow2(uint32_t v)
    3124 {
    3125  v |= v >> 1;
    3126  v |= v >> 2;
    3127  v |= v >> 4;
    3128  v |= v >> 8;
    3129  v |= v >> 16;
    3130  v = v ^ (v >> 1);
    3131  return v;
    3132 }
    3133 static inline uint64_t VmaPrevPow2(uint64_t v)
    3134 {
    3135  v |= v >> 1;
    3136  v |= v >> 2;
    3137  v |= v >> 4;
    3138  v |= v >> 8;
    3139  v |= v >> 16;
    3140  v |= v >> 32;
    3141  v = v ^ (v >> 1);
    3142  return v;
    3143 }
    3144 
    3145 static inline bool VmaStrIsEmpty(const char* pStr)
    3146 {
    3147  return pStr == VMA_NULL || *pStr == '\0';
    3148 }
    3149 
    3150 static const char* VmaAlgorithmToStr(uint32_t algorithm)
    3151 {
    3152  switch(algorithm)
    3153  {
    3155  return "Linear";
    3157  return "Buddy";
    3158  case 0:
    3159  return "Default";
    3160  default:
    3161  VMA_ASSERT(0);
    3162  return "";
    3163  }
    3164 }
    3165 
    3166 #ifndef VMA_SORT
    3167 
    3168 template<typename Iterator, typename Compare>
    3169 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    3170 {
    3171  Iterator centerValue = end; --centerValue;
    3172  Iterator insertIndex = beg;
    3173  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    3174  {
    3175  if(cmp(*memTypeIndex, *centerValue))
    3176  {
    3177  if(insertIndex != memTypeIndex)
    3178  {
    3179  VMA_SWAP(*memTypeIndex, *insertIndex);
    3180  }
    3181  ++insertIndex;
    3182  }
    3183  }
    3184  if(insertIndex != centerValue)
    3185  {
    3186  VMA_SWAP(*insertIndex, *centerValue);
    3187  }
    3188  return insertIndex;
    3189 }
    3190 
    3191 template<typename Iterator, typename Compare>
    3192 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    3193 {
    3194  if(beg < end)
    3195  {
    3196  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    3197  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    3198  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    3199  }
    3200 }
    3201 
    3202 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    3203 
    3204 #endif // #ifndef VMA_SORT
    3205 
    3206 /*
    3207 Returns true if two memory blocks occupy overlapping pages.
    3208 ResourceA must be in less memory offset than ResourceB.
    3209 
    3210 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    3211 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    3212 */
    3213 static inline bool VmaBlocksOnSamePage(
    3214  VkDeviceSize resourceAOffset,
    3215  VkDeviceSize resourceASize,
    3216  VkDeviceSize resourceBOffset,
    3217  VkDeviceSize pageSize)
    3218 {
    3219  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    3220  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    3221  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    3222  VkDeviceSize resourceBStart = resourceBOffset;
    3223  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    3224  return resourceAEndPage == resourceBStartPage;
    3225 }
    3226 
    3227 enum VmaSuballocationType
    3228 {
    3229  VMA_SUBALLOCATION_TYPE_FREE = 0,
    3230  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    3231  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    3232  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    3233  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    3234  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    3235  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    3236 };
    3237 
    3238 /*
    3239 Returns true if given suballocation types could conflict and must respect
    3240 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    3241 or linear image and another one is optimal image. If type is unknown, behave
    3242 conservatively.
    3243 */
    3244 static inline bool VmaIsBufferImageGranularityConflict(
    3245  VmaSuballocationType suballocType1,
    3246  VmaSuballocationType suballocType2)
    3247 {
    3248  if(suballocType1 > suballocType2)
    3249  {
    3250  VMA_SWAP(suballocType1, suballocType2);
    3251  }
    3252 
    3253  switch(suballocType1)
    3254  {
    3255  case VMA_SUBALLOCATION_TYPE_FREE:
    3256  return false;
    3257  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    3258  return true;
    3259  case VMA_SUBALLOCATION_TYPE_BUFFER:
    3260  return
    3261  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3262  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3263  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    3264  return
    3265  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3266  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    3267  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3268  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    3269  return
    3270  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3271  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    3272  return false;
    3273  default:
    3274  VMA_ASSERT(0);
    3275  return true;
    3276  }
    3277 }
    3278 
    3279 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
    3280 {
    3281  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
    3282  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3283  for(size_t i = 0; i < numberCount; ++i, ++pDst)
    3284  {
    3285  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
    3286  }
    3287 }
    3288 
    3289 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
    3290 {
    3291  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
    3292  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3293  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
    3294  {
    3295  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
    3296  {
    3297  return false;
    3298  }
    3299  }
    3300  return true;
    3301 }
    3302 
    3303 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    3304 struct VmaMutexLock
    3305 {
    3306  VMA_CLASS_NO_COPY(VmaMutexLock)
    3307 public:
    3308  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
    3309  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3310  {
    3311  if(m_pMutex)
    3312  {
    3313  m_pMutex->Lock();
    3314  }
    3315  }
    3316 
    3317  ~VmaMutexLock()
    3318  {
    3319  if(m_pMutex)
    3320  {
    3321  m_pMutex->Unlock();
    3322  }
    3323  }
    3324 
    3325 private:
    3326  VMA_MUTEX* m_pMutex;
    3327 };
    3328 
    3329 #if VMA_DEBUG_GLOBAL_MUTEX
    3330  static VMA_MUTEX gDebugGlobalMutex;
    3331  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    3332 #else
    3333  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    3334 #endif
    3335 
    3336 // Minimum size of a free suballocation to register it in the free suballocation collection.
    3337 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    3338 
    3339 /*
    3340 Performs binary search and returns iterator to first element that is greater or
    3341 equal to (key), according to comparison (cmp).
    3342 
    3343 Cmp should return true if first argument is less than second argument.
    3344 
    3345 Returned value is the found element, if present in the collection or place where
    3346 new element with value (key) should be inserted.
    3347 */
    3348 template <typename CmpLess, typename IterT, typename KeyT>
    3349 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
    3350 {
    3351  size_t down = 0, up = (end - beg);
    3352  while(down < up)
    3353  {
    3354  const size_t mid = (down + up) / 2;
    3355  if(cmp(*(beg+mid), key))
    3356  {
    3357  down = mid + 1;
    3358  }
    3359  else
    3360  {
    3361  up = mid;
    3362  }
    3363  }
    3364  return beg + down;
    3365 }
    3366 
    3368 // Memory allocation
    3369 
    3370 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    3371 {
    3372  if((pAllocationCallbacks != VMA_NULL) &&
    3373  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    3374  {
    3375  return (*pAllocationCallbacks->pfnAllocation)(
    3376  pAllocationCallbacks->pUserData,
    3377  size,
    3378  alignment,
    3379  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    3380  }
    3381  else
    3382  {
    3383  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    3384  }
    3385 }
    3386 
    3387 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    3388 {
    3389  if((pAllocationCallbacks != VMA_NULL) &&
    3390  (pAllocationCallbacks->pfnFree != VMA_NULL))
    3391  {
    3392  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    3393  }
    3394  else
    3395  {
    3396  VMA_SYSTEM_FREE(ptr);
    3397  }
    3398 }
    3399 
    3400 template<typename T>
    3401 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    3402 {
    3403  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    3404 }
    3405 
    3406 template<typename T>
    3407 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    3408 {
    3409  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    3410 }
    3411 
    3412 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    3413 
    3414 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    3415 
    3416 template<typename T>
    3417 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    3418 {
    3419  ptr->~T();
    3420  VmaFree(pAllocationCallbacks, ptr);
    3421 }
    3422 
    3423 template<typename T>
    3424 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    3425 {
    3426  if(ptr != VMA_NULL)
    3427  {
    3428  for(size_t i = count; i--; )
    3429  {
    3430  ptr[i].~T();
    3431  }
    3432  VmaFree(pAllocationCallbacks, ptr);
    3433  }
    3434 }
    3435 
    3436 // STL-compatible allocator.
    3437 template<typename T>
    3438 class VmaStlAllocator
    3439 {
    3440 public:
    3441  const VkAllocationCallbacks* const m_pCallbacks;
    3442  typedef T value_type;
    3443 
    3444  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    3445  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    3446 
    3447  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    3448  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    3449 
    3450  template<typename U>
    3451  bool operator==(const VmaStlAllocator<U>& rhs) const
    3452  {
    3453  return m_pCallbacks == rhs.m_pCallbacks;
    3454  }
    3455  template<typename U>
    3456  bool operator!=(const VmaStlAllocator<U>& rhs) const
    3457  {
    3458  return m_pCallbacks != rhs.m_pCallbacks;
    3459  }
    3460 
    3461  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    3462 };
    3463 
    3464 #if VMA_USE_STL_VECTOR
    3465 
    3466 #define VmaVector std::vector
    3467 
    3468 template<typename T, typename allocatorT>
    3469 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    3470 {
    3471  vec.insert(vec.begin() + index, item);
    3472 }
    3473 
    3474 template<typename T, typename allocatorT>
    3475 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    3476 {
    3477  vec.erase(vec.begin() + index);
    3478 }
    3479 
    3480 #else // #if VMA_USE_STL_VECTOR
    3481 
    3482 /* Class with interface compatible with subset of std::vector.
    3483 T must be POD because constructors and destructors are not called and memcpy is
    3484 used for these objects. */
    3485 template<typename T, typename AllocatorT>
    3486 class VmaVector
    3487 {
    3488 public:
    3489  typedef T value_type;
    3490 
    3491  VmaVector(const AllocatorT& allocator) :
    3492  m_Allocator(allocator),
    3493  m_pArray(VMA_NULL),
    3494  m_Count(0),
    3495  m_Capacity(0)
    3496  {
    3497  }
    3498 
    3499  VmaVector(size_t count, const AllocatorT& allocator) :
    3500  m_Allocator(allocator),
    3501  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    3502  m_Count(count),
    3503  m_Capacity(count)
    3504  {
    3505  }
    3506 
    3507  VmaVector(const VmaVector<T, AllocatorT>& src) :
    3508  m_Allocator(src.m_Allocator),
    3509  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    3510  m_Count(src.m_Count),
    3511  m_Capacity(src.m_Count)
    3512  {
    3513  if(m_Count != 0)
    3514  {
    3515  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    3516  }
    3517  }
    3518 
    3519  ~VmaVector()
    3520  {
    3521  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3522  }
    3523 
    3524  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    3525  {
    3526  if(&rhs != this)
    3527  {
    3528  resize(rhs.m_Count);
    3529  if(m_Count != 0)
    3530  {
    3531  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    3532  }
    3533  }
    3534  return *this;
    3535  }
    3536 
    3537  bool empty() const { return m_Count == 0; }
    3538  size_t size() const { return m_Count; }
    3539  T* data() { return m_pArray; }
    3540  const T* data() const { return m_pArray; }
    3541 
    3542  T& operator[](size_t index)
    3543  {
    3544  VMA_HEAVY_ASSERT(index < m_Count);
    3545  return m_pArray[index];
    3546  }
    3547  const T& operator[](size_t index) const
    3548  {
    3549  VMA_HEAVY_ASSERT(index < m_Count);
    3550  return m_pArray[index];
    3551  }
    3552 
    3553  T& front()
    3554  {
    3555  VMA_HEAVY_ASSERT(m_Count > 0);
    3556  return m_pArray[0];
    3557  }
    3558  const T& front() const
    3559  {
    3560  VMA_HEAVY_ASSERT(m_Count > 0);
    3561  return m_pArray[0];
    3562  }
    3563  T& back()
    3564  {
    3565  VMA_HEAVY_ASSERT(m_Count > 0);
    3566  return m_pArray[m_Count - 1];
    3567  }
    3568  const T& back() const
    3569  {
    3570  VMA_HEAVY_ASSERT(m_Count > 0);
    3571  return m_pArray[m_Count - 1];
    3572  }
    3573 
    3574  void reserve(size_t newCapacity, bool freeMemory = false)
    3575  {
    3576  newCapacity = VMA_MAX(newCapacity, m_Count);
    3577 
    3578  if((newCapacity < m_Capacity) && !freeMemory)
    3579  {
    3580  newCapacity = m_Capacity;
    3581  }
    3582 
    3583  if(newCapacity != m_Capacity)
    3584  {
    3585  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    3586  if(m_Count != 0)
    3587  {
    3588  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    3589  }
    3590  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3591  m_Capacity = newCapacity;
    3592  m_pArray = newArray;
    3593  }
    3594  }
    3595 
    3596  void resize(size_t newCount, bool freeMemory = false)
    3597  {
    3598  size_t newCapacity = m_Capacity;
    3599  if(newCount > m_Capacity)
    3600  {
    3601  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    3602  }
    3603  else if(freeMemory)
    3604  {
    3605  newCapacity = newCount;
    3606  }
    3607 
    3608  if(newCapacity != m_Capacity)
    3609  {
    3610  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    3611  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    3612  if(elementsToCopy != 0)
    3613  {
    3614  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    3615  }
    3616  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3617  m_Capacity = newCapacity;
    3618  m_pArray = newArray;
    3619  }
    3620 
    3621  m_Count = newCount;
    3622  }
    3623 
    3624  void clear(bool freeMemory = false)
    3625  {
    3626  resize(0, freeMemory);
    3627  }
    3628 
    3629  void insert(size_t index, const T& src)
    3630  {
    3631  VMA_HEAVY_ASSERT(index <= m_Count);
    3632  const size_t oldCount = size();
    3633  resize(oldCount + 1);
    3634  if(index < oldCount)
    3635  {
    3636  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    3637  }
    3638  m_pArray[index] = src;
    3639  }
    3640 
    3641  void remove(size_t index)
    3642  {
    3643  VMA_HEAVY_ASSERT(index < m_Count);
    3644  const size_t oldCount = size();
    3645  if(index < oldCount - 1)
    3646  {
    3647  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    3648  }
    3649  resize(oldCount - 1);
    3650  }
    3651 
    3652  void push_back(const T& src)
    3653  {
    3654  const size_t newIndex = size();
    3655  resize(newIndex + 1);
    3656  m_pArray[newIndex] = src;
    3657  }
    3658 
    3659  void pop_back()
    3660  {
    3661  VMA_HEAVY_ASSERT(m_Count > 0);
    3662  resize(size() - 1);
    3663  }
    3664 
    3665  void push_front(const T& src)
    3666  {
    3667  insert(0, src);
    3668  }
    3669 
    3670  void pop_front()
    3671  {
    3672  VMA_HEAVY_ASSERT(m_Count > 0);
    3673  remove(0);
    3674  }
    3675 
    3676  typedef T* iterator;
    3677 
    3678  iterator begin() { return m_pArray; }
    3679  iterator end() { return m_pArray + m_Count; }
    3680 
    3681 private:
    3682  AllocatorT m_Allocator;
    3683  T* m_pArray;
    3684  size_t m_Count;
    3685  size_t m_Capacity;
    3686 };
    3687 
    3688 template<typename T, typename allocatorT>
    3689 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    3690 {
    3691  vec.insert(index, item);
    3692 }
    3693 
    3694 template<typename T, typename allocatorT>
    3695 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    3696 {
    3697  vec.remove(index);
    3698 }
    3699 
    3700 #endif // #if VMA_USE_STL_VECTOR
    3701 
    3702 template<typename CmpLess, typename VectorT>
    3703 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    3704 {
    3705  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    3706  vector.data(),
    3707  vector.data() + vector.size(),
    3708  value,
    3709  CmpLess()) - vector.data();
    3710  VmaVectorInsert(vector, indexToInsert, value);
    3711  return indexToInsert;
    3712 }
    3713 
    3714 template<typename CmpLess, typename VectorT>
    3715 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    3716 {
    3717  CmpLess comparator;
    3718  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    3719  vector.begin(),
    3720  vector.end(),
    3721  value,
    3722  comparator);
    3723  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    3724  {
    3725  size_t indexToRemove = it - vector.begin();
    3726  VmaVectorRemove(vector, indexToRemove);
    3727  return true;
    3728  }
    3729  return false;
    3730 }
    3731 
    3732 template<typename CmpLess, typename IterT, typename KeyT>
    3733 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
    3734 {
    3735  CmpLess comparator;
    3736  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
    3737  beg, end, value, comparator);
    3738  if(it == end ||
    3739  (!comparator(*it, value) && !comparator(value, *it)))
    3740  {
    3741  return it;
    3742  }
    3743  return end;
    3744 }
    3745 
    3747 // class VmaPoolAllocator
    3748 
    3749 /*
    3750 Allocator for objects of type T using a list of arrays (pools) to speed up
    3751 allocation. Number of elements that can be allocated is not bounded because
    3752 allocator can create multiple blocks.
    3753 */
    3754 template<typename T>
    3755 class VmaPoolAllocator
    3756 {
    3757  VMA_CLASS_NO_COPY(VmaPoolAllocator)
    3758 public:
    3759  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
    3760  ~VmaPoolAllocator();
    3761  void Clear();
    3762  T* Alloc();
    3763  void Free(T* ptr);
    3764 
    3765 private:
    3766  union Item
    3767  {
    3768  uint32_t NextFreeIndex;
    3769  T Value;
    3770  };
    3771 
    3772  struct ItemBlock
    3773  {
    3774  Item* pItems;
    3775  uint32_t FirstFreeIndex;
    3776  };
    3777 
    3778  const VkAllocationCallbacks* m_pAllocationCallbacks;
    3779  size_t m_ItemsPerBlock;
    3780  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    3781 
    3782  ItemBlock& CreateNewBlock();
    3783 };
    3784 
    3785 template<typename T>
    3786 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
    3787  m_pAllocationCallbacks(pAllocationCallbacks),
    3788  m_ItemsPerBlock(itemsPerBlock),
    3789  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    3790 {
    3791  VMA_ASSERT(itemsPerBlock > 0);
    3792 }
    3793 
    3794 template<typename T>
    3795 VmaPoolAllocator<T>::~VmaPoolAllocator()
    3796 {
    3797  Clear();
    3798 }
    3799 
    3800 template<typename T>
    3801 void VmaPoolAllocator<T>::Clear()
    3802 {
    3803  for(size_t i = m_ItemBlocks.size(); i--; )
    3804  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
    3805  m_ItemBlocks.clear();
    3806 }
    3807 
    3808 template<typename T>
    3809 T* VmaPoolAllocator<T>::Alloc()
    3810 {
    3811  for(size_t i = m_ItemBlocks.size(); i--; )
    3812  {
    3813  ItemBlock& block = m_ItemBlocks[i];
    3814  // This block has some free items: Use first one.
    3815  if(block.FirstFreeIndex != UINT32_MAX)
    3816  {
    3817  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    3818  block.FirstFreeIndex = pItem->NextFreeIndex;
    3819  return &pItem->Value;
    3820  }
    3821  }
    3822 
    3823  // No block has free item: Create new one and use it.
    3824  ItemBlock& newBlock = CreateNewBlock();
    3825  Item* const pItem = &newBlock.pItems[0];
    3826  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    3827  return &pItem->Value;
    3828 }
    3829 
    3830 template<typename T>
    3831 void VmaPoolAllocator<T>::Free(T* ptr)
    3832 {
    3833  // Search all memory blocks to find ptr.
    3834  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
    3835  {
    3836  ItemBlock& block = m_ItemBlocks[i];
    3837 
    3838  // Casting to union.
    3839  Item* pItemPtr;
    3840  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    3841 
    3842  // Check if pItemPtr is in address range of this block.
    3843  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
    3844  {
    3845  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    3846  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    3847  block.FirstFreeIndex = index;
    3848  return;
    3849  }
    3850  }
    3851  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    3852 }
    3853 
    3854 template<typename T>
    3855 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    3856 {
    3857  ItemBlock newBlock = {
    3858  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
    3859 
    3860  m_ItemBlocks.push_back(newBlock);
    3861 
    3862  // Setup singly-linked list of all free items in this block.
    3863  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
    3864  newBlock.pItems[i].NextFreeIndex = i + 1;
    3865  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
    3866  return m_ItemBlocks.back();
    3867 }
    3868 
    3870 // class VmaRawList, VmaList
    3871 
    3872 #if VMA_USE_STL_LIST
    3873 
    3874 #define VmaList std::list
    3875 
    3876 #else // #if VMA_USE_STL_LIST
    3877 
    3878 template<typename T>
    3879 struct VmaListItem
    3880 {
    3881  VmaListItem* pPrev;
    3882  VmaListItem* pNext;
    3883  T Value;
    3884 };
    3885 
    3886 // Doubly linked list.
    3887 template<typename T>
    3888 class VmaRawList
    3889 {
    3890  VMA_CLASS_NO_COPY(VmaRawList)
    3891 public:
    3892  typedef VmaListItem<T> ItemType;
    3893 
    3894  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    3895  ~VmaRawList();
    3896  void Clear();
    3897 
    3898  size_t GetCount() const { return m_Count; }
    3899  bool IsEmpty() const { return m_Count == 0; }
    3900 
    3901  ItemType* Front() { return m_pFront; }
    3902  const ItemType* Front() const { return m_pFront; }
    3903  ItemType* Back() { return m_pBack; }
    3904  const ItemType* Back() const { return m_pBack; }
    3905 
    3906  ItemType* PushBack();
    3907  ItemType* PushFront();
    3908  ItemType* PushBack(const T& value);
    3909  ItemType* PushFront(const T& value);
    3910  void PopBack();
    3911  void PopFront();
    3912 
    3913  // Item can be null - it means PushBack.
    3914  ItemType* InsertBefore(ItemType* pItem);
    3915  // Item can be null - it means PushFront.
    3916  ItemType* InsertAfter(ItemType* pItem);
    3917 
    3918  ItemType* InsertBefore(ItemType* pItem, const T& value);
    3919  ItemType* InsertAfter(ItemType* pItem, const T& value);
    3920 
    3921  void Remove(ItemType* pItem);
    3922 
    3923 private:
    3924  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    3925  VmaPoolAllocator<ItemType> m_ItemAllocator;
    3926  ItemType* m_pFront;
    3927  ItemType* m_pBack;
    3928  size_t m_Count;
    3929 };
    3930 
    3931 template<typename T>
    3932 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    3933  m_pAllocationCallbacks(pAllocationCallbacks),
    3934  m_ItemAllocator(pAllocationCallbacks, 128),
    3935  m_pFront(VMA_NULL),
    3936  m_pBack(VMA_NULL),
    3937  m_Count(0)
    3938 {
    3939 }
    3940 
    3941 template<typename T>
    3942 VmaRawList<T>::~VmaRawList()
    3943 {
    3944  // Intentionally not calling Clear, because that would be unnecessary
    3945  // computations to return all items to m_ItemAllocator as free.
    3946 }
    3947 
    3948 template<typename T>
    3949 void VmaRawList<T>::Clear()
    3950 {
    3951  if(IsEmpty() == false)
    3952  {
    3953  ItemType* pItem = m_pBack;
    3954  while(pItem != VMA_NULL)
    3955  {
    3956  ItemType* const pPrevItem = pItem->pPrev;
    3957  m_ItemAllocator.Free(pItem);
    3958  pItem = pPrevItem;
    3959  }
    3960  m_pFront = VMA_NULL;
    3961  m_pBack = VMA_NULL;
    3962  m_Count = 0;
    3963  }
    3964 }
    3965 
    3966 template<typename T>
    3967 VmaListItem<T>* VmaRawList<T>::PushBack()
    3968 {
    3969  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3970  pNewItem->pNext = VMA_NULL;
    3971  if(IsEmpty())
    3972  {
    3973  pNewItem->pPrev = VMA_NULL;
    3974  m_pFront = pNewItem;
    3975  m_pBack = pNewItem;
    3976  m_Count = 1;
    3977  }
    3978  else
    3979  {
    3980  pNewItem->pPrev = m_pBack;
    3981  m_pBack->pNext = pNewItem;
    3982  m_pBack = pNewItem;
    3983  ++m_Count;
    3984  }
    3985  return pNewItem;
    3986 }
    3987 
    3988 template<typename T>
    3989 VmaListItem<T>* VmaRawList<T>::PushFront()
    3990 {
    3991  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3992  pNewItem->pPrev = VMA_NULL;
    3993  if(IsEmpty())
    3994  {
    3995  pNewItem->pNext = VMA_NULL;
    3996  m_pFront = pNewItem;
    3997  m_pBack = pNewItem;
    3998  m_Count = 1;
    3999  }
    4000  else
    4001  {
    4002  pNewItem->pNext = m_pFront;
    4003  m_pFront->pPrev = pNewItem;
    4004  m_pFront = pNewItem;
    4005  ++m_Count;
    4006  }
    4007  return pNewItem;
    4008 }
    4009 
    4010 template<typename T>
    4011 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    4012 {
    4013  ItemType* const pNewItem = PushBack();
    4014  pNewItem->Value = value;
    4015  return pNewItem;
    4016 }
    4017 
    4018 template<typename T>
    4019 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    4020 {
    4021  ItemType* const pNewItem = PushFront();
    4022  pNewItem->Value = value;
    4023  return pNewItem;
    4024 }
    4025 
    4026 template<typename T>
    4027 void VmaRawList<T>::PopBack()
    4028 {
    4029  VMA_HEAVY_ASSERT(m_Count > 0);
    4030  ItemType* const pBackItem = m_pBack;
    4031  ItemType* const pPrevItem = pBackItem->pPrev;
    4032  if(pPrevItem != VMA_NULL)
    4033  {
    4034  pPrevItem->pNext = VMA_NULL;
    4035  }
    4036  m_pBack = pPrevItem;
    4037  m_ItemAllocator.Free(pBackItem);
    4038  --m_Count;
    4039 }
    4040 
    4041 template<typename T>
    4042 void VmaRawList<T>::PopFront()
    4043 {
    4044  VMA_HEAVY_ASSERT(m_Count > 0);
    4045  ItemType* const pFrontItem = m_pFront;
    4046  ItemType* const pNextItem = pFrontItem->pNext;
    4047  if(pNextItem != VMA_NULL)
    4048  {
    4049  pNextItem->pPrev = VMA_NULL;
    4050  }
    4051  m_pFront = pNextItem;
    4052  m_ItemAllocator.Free(pFrontItem);
    4053  --m_Count;
    4054 }
    4055 
    4056 template<typename T>
    4057 void VmaRawList<T>::Remove(ItemType* pItem)
    4058 {
    4059  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    4060  VMA_HEAVY_ASSERT(m_Count > 0);
    4061 
    4062  if(pItem->pPrev != VMA_NULL)
    4063  {
    4064  pItem->pPrev->pNext = pItem->pNext;
    4065  }
    4066  else
    4067  {
    4068  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4069  m_pFront = pItem->pNext;
    4070  }
    4071 
    4072  if(pItem->pNext != VMA_NULL)
    4073  {
    4074  pItem->pNext->pPrev = pItem->pPrev;
    4075  }
    4076  else
    4077  {
    4078  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4079  m_pBack = pItem->pPrev;
    4080  }
    4081 
    4082  m_ItemAllocator.Free(pItem);
    4083  --m_Count;
    4084 }
    4085 
    4086 template<typename T>
    4087 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    4088 {
    4089  if(pItem != VMA_NULL)
    4090  {
    4091  ItemType* const prevItem = pItem->pPrev;
    4092  ItemType* const newItem = m_ItemAllocator.Alloc();
    4093  newItem->pPrev = prevItem;
    4094  newItem->pNext = pItem;
    4095  pItem->pPrev = newItem;
    4096  if(prevItem != VMA_NULL)
    4097  {
    4098  prevItem->pNext = newItem;
    4099  }
    4100  else
    4101  {
    4102  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4103  m_pFront = newItem;
    4104  }
    4105  ++m_Count;
    4106  return newItem;
    4107  }
    4108  else
    4109  return PushBack();
    4110 }
    4111 
    4112 template<typename T>
    4113 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    4114 {
    4115  if(pItem != VMA_NULL)
    4116  {
    4117  ItemType* const nextItem = pItem->pNext;
    4118  ItemType* const newItem = m_ItemAllocator.Alloc();
    4119  newItem->pNext = nextItem;
    4120  newItem->pPrev = pItem;
    4121  pItem->pNext = newItem;
    4122  if(nextItem != VMA_NULL)
    4123  {
    4124  nextItem->pPrev = newItem;
    4125  }
    4126  else
    4127  {
    4128  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4129  m_pBack = newItem;
    4130  }
    4131  ++m_Count;
    4132  return newItem;
    4133  }
    4134  else
    4135  return PushFront();
    4136 }
    4137 
    4138 template<typename T>
    4139 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    4140 {
    4141  ItemType* const newItem = InsertBefore(pItem);
    4142  newItem->Value = value;
    4143  return newItem;
    4144 }
    4145 
    4146 template<typename T>
    4147 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    4148 {
    4149  ItemType* const newItem = InsertAfter(pItem);
    4150  newItem->Value = value;
    4151  return newItem;
    4152 }
    4153 
    4154 template<typename T, typename AllocatorT>
    4155 class VmaList
    4156 {
    4157  VMA_CLASS_NO_COPY(VmaList)
    4158 public:
    4159  class iterator
    4160  {
    4161  public:
    4162  iterator() :
    4163  m_pList(VMA_NULL),
    4164  m_pItem(VMA_NULL)
    4165  {
    4166  }
    4167 
    4168  T& operator*() const
    4169  {
    4170  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4171  return m_pItem->Value;
    4172  }
    4173  T* operator->() const
    4174  {
    4175  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4176  return &m_pItem->Value;
    4177  }
    4178 
    4179  iterator& operator++()
    4180  {
    4181  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4182  m_pItem = m_pItem->pNext;
    4183  return *this;
    4184  }
    4185  iterator& operator--()
    4186  {
    4187  if(m_pItem != VMA_NULL)
    4188  {
    4189  m_pItem = m_pItem->pPrev;
    4190  }
    4191  else
    4192  {
    4193  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4194  m_pItem = m_pList->Back();
    4195  }
    4196  return *this;
    4197  }
    4198 
    4199  iterator operator++(int)
    4200  {
    4201  iterator result = *this;
    4202  ++*this;
    4203  return result;
    4204  }
    4205  iterator operator--(int)
    4206  {
    4207  iterator result = *this;
    4208  --*this;
    4209  return result;
    4210  }
    4211 
    4212  bool operator==(const iterator& rhs) const
    4213  {
    4214  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4215  return m_pItem == rhs.m_pItem;
    4216  }
    4217  bool operator!=(const iterator& rhs) const
    4218  {
    4219  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4220  return m_pItem != rhs.m_pItem;
    4221  }
    4222 
    4223  private:
    4224  VmaRawList<T>* m_pList;
    4225  VmaListItem<T>* m_pItem;
    4226 
    4227  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    4228  m_pList(pList),
    4229  m_pItem(pItem)
    4230  {
    4231  }
    4232 
    4233  friend class VmaList<T, AllocatorT>;
    4234  };
    4235 
    4236  class const_iterator
    4237  {
    4238  public:
    4239  const_iterator() :
    4240  m_pList(VMA_NULL),
    4241  m_pItem(VMA_NULL)
    4242  {
    4243  }
    4244 
    4245  const_iterator(const iterator& src) :
    4246  m_pList(src.m_pList),
    4247  m_pItem(src.m_pItem)
    4248  {
    4249  }
    4250 
    4251  const T& operator*() const
    4252  {
    4253  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4254  return m_pItem->Value;
    4255  }
    4256  const T* operator->() const
    4257  {
    4258  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4259  return &m_pItem->Value;
    4260  }
    4261 
    4262  const_iterator& operator++()
    4263  {
    4264  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4265  m_pItem = m_pItem->pNext;
    4266  return *this;
    4267  }
    4268  const_iterator& operator--()
    4269  {
    4270  if(m_pItem != VMA_NULL)
    4271  {
    4272  m_pItem = m_pItem->pPrev;
    4273  }
    4274  else
    4275  {
    4276  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4277  m_pItem = m_pList->Back();
    4278  }
    4279  return *this;
    4280  }
    4281 
    4282  const_iterator operator++(int)
    4283  {
    4284  const_iterator result = *this;
    4285  ++*this;
    4286  return result;
    4287  }
    4288  const_iterator operator--(int)
    4289  {
    4290  const_iterator result = *this;
    4291  --*this;
    4292  return result;
    4293  }
    4294 
    4295  bool operator==(const const_iterator& rhs) const
    4296  {
    4297  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4298  return m_pItem == rhs.m_pItem;
    4299  }
    4300  bool operator!=(const const_iterator& rhs) const
    4301  {
    4302  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4303  return m_pItem != rhs.m_pItem;
    4304  }
    4305 
    4306  private:
    4307  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    4308  m_pList(pList),
    4309  m_pItem(pItem)
    4310  {
    4311  }
    4312 
    4313  const VmaRawList<T>* m_pList;
    4314  const VmaListItem<T>* m_pItem;
    4315 
    4316  friend class VmaList<T, AllocatorT>;
    4317  };
    4318 
    4319  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    4320 
    4321  bool empty() const { return m_RawList.IsEmpty(); }
    4322  size_t size() const { return m_RawList.GetCount(); }
    4323 
    4324  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    4325  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    4326 
    4327  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    4328  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    4329 
    4330  void clear() { m_RawList.Clear(); }
    4331  void push_back(const T& value) { m_RawList.PushBack(value); }
    4332  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    4333  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    4334 
    4335 private:
    4336  VmaRawList<T> m_RawList;
    4337 };
    4338 
    4339 #endif // #if VMA_USE_STL_LIST
    4340 
    4342 // class VmaMap
    4343 
    4344 // Unused in this version.
    4345 #if 0
    4346 
    4347 #if VMA_USE_STL_UNORDERED_MAP
    4348 
    4349 #define VmaPair std::pair
    4350 
    4351 #define VMA_MAP_TYPE(KeyT, ValueT) \
    4352  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    4353 
    4354 #else // #if VMA_USE_STL_UNORDERED_MAP
    4355 
    4356 template<typename T1, typename T2>
    4357 struct VmaPair
    4358 {
    4359  T1 first;
    4360  T2 second;
    4361 
    4362  VmaPair() : first(), second() { }
    4363  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    4364 };
    4365 
    4366 /* Class compatible with subset of interface of std::unordered_map.
    4367 KeyT, ValueT must be POD because they will be stored in VmaVector.
    4368 */
    4369 template<typename KeyT, typename ValueT>
    4370 class VmaMap
    4371 {
    4372 public:
    4373  typedef VmaPair<KeyT, ValueT> PairType;
    4374  typedef PairType* iterator;
    4375 
    4376  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    4377 
    4378  iterator begin() { return m_Vector.begin(); }
    4379  iterator end() { return m_Vector.end(); }
    4380 
    4381  void insert(const PairType& pair);
    4382  iterator find(const KeyT& key);
    4383  void erase(iterator it);
    4384 
    4385 private:
    4386  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    4387 };
    4388 
    4389 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    4390 
    4391 template<typename FirstT, typename SecondT>
    4392 struct VmaPairFirstLess
    4393 {
    4394  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    4395  {
    4396  return lhs.first < rhs.first;
    4397  }
    4398  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    4399  {
    4400  return lhs.first < rhsFirst;
    4401  }
    4402 };
    4403 
    4404 template<typename KeyT, typename ValueT>
    4405 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    4406 {
    4407  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4408  m_Vector.data(),
    4409  m_Vector.data() + m_Vector.size(),
    4410  pair,
    4411  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    4412  VmaVectorInsert(m_Vector, indexToInsert, pair);
    4413 }
    4414 
    4415 template<typename KeyT, typename ValueT>
    4416 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    4417 {
    4418  PairType* it = VmaBinaryFindFirstNotLess(
    4419  m_Vector.data(),
    4420  m_Vector.data() + m_Vector.size(),
    4421  key,
    4422  VmaPairFirstLess<KeyT, ValueT>());
    4423  if((it != m_Vector.end()) && (it->first == key))
    4424  {
    4425  return it;
    4426  }
    4427  else
    4428  {
    4429  return m_Vector.end();
    4430  }
    4431 }
    4432 
    4433 template<typename KeyT, typename ValueT>
    4434 void VmaMap<KeyT, ValueT>::erase(iterator it)
    4435 {
    4436  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    4437 }
    4438 
    4439 #endif // #if VMA_USE_STL_UNORDERED_MAP
    4440 
    4441 #endif // #if 0
    4442 
    4444 
    4445 class VmaDeviceMemoryBlock;
    4446 
    4447 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
    4448 
    4449 struct VmaAllocation_T
    4450 {
    4451  VMA_CLASS_NO_COPY(VmaAllocation_T)
    4452 private:
    4453  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    4454 
    4455  enum FLAGS
    4456  {
    4457  FLAG_USER_DATA_STRING = 0x01,
    4458  };
    4459 
    4460 public:
    4461  enum ALLOCATION_TYPE
    4462  {
    4463  ALLOCATION_TYPE_NONE,
    4464  ALLOCATION_TYPE_BLOCK,
    4465  ALLOCATION_TYPE_DEDICATED,
    4466  };
    4467 
    4468  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
    4469  m_Alignment(1),
    4470  m_Size(0),
    4471  m_pUserData(VMA_NULL),
    4472  m_LastUseFrameIndex(currentFrameIndex),
    4473  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
    4474  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
    4475  m_MapCount(0),
    4476  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
    4477  {
    4478 #if VMA_STATS_STRING_ENABLED
    4479  m_CreationFrameIndex = currentFrameIndex;
    4480  m_BufferImageUsage = 0;
    4481 #endif
    4482  }
    4483 
    4484  ~VmaAllocation_T()
    4485  {
    4486  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    4487 
    4488  // Check if owned string was freed.
    4489  VMA_ASSERT(m_pUserData == VMA_NULL);
    4490  }
    4491 
    4492  void InitBlockAllocation(
    4493  VmaPool hPool,
    4494  VmaDeviceMemoryBlock* block,
    4495  VkDeviceSize offset,
    4496  VkDeviceSize alignment,
    4497  VkDeviceSize size,
    4498  VmaSuballocationType suballocationType,
    4499  bool mapped,
    4500  bool canBecomeLost)
    4501  {
    4502  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4503  VMA_ASSERT(block != VMA_NULL);
    4504  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4505  m_Alignment = alignment;
    4506  m_Size = size;
    4507  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4508  m_SuballocationType = (uint8_t)suballocationType;
    4509  m_BlockAllocation.m_hPool = hPool;
    4510  m_BlockAllocation.m_Block = block;
    4511  m_BlockAllocation.m_Offset = offset;
    4512  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    4513  }
    4514 
    4515  void InitLost()
    4516  {
    4517  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4518  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    4519  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4520  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
    4521  m_BlockAllocation.m_Block = VMA_NULL;
    4522  m_BlockAllocation.m_Offset = 0;
    4523  m_BlockAllocation.m_CanBecomeLost = true;
    4524  }
    4525 
    4526  void ChangeBlockAllocation(
    4527  VmaAllocator hAllocator,
    4528  VmaDeviceMemoryBlock* block,
    4529  VkDeviceSize offset);
    4530 
    4531  // pMappedData not null means allocation is created with MAPPED flag.
    4532  void InitDedicatedAllocation(
    4533  uint32_t memoryTypeIndex,
    4534  VkDeviceMemory hMemory,
    4535  VmaSuballocationType suballocationType,
    4536  void* pMappedData,
    4537  VkDeviceSize size)
    4538  {
    4539  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4540  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    4541  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    4542  m_Alignment = 0;
    4543  m_Size = size;
    4544  m_SuballocationType = (uint8_t)suballocationType;
    4545  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4546  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    4547  m_DedicatedAllocation.m_hMemory = hMemory;
    4548  m_DedicatedAllocation.m_pMappedData = pMappedData;
    4549  }
    4550 
    4551  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    4552  VkDeviceSize GetAlignment() const { return m_Alignment; }
    4553  VkDeviceSize GetSize() const { return m_Size; }
    4554  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
    4555  void* GetUserData() const { return m_pUserData; }
    4556  void SetUserData(VmaAllocator hAllocator, void* pUserData);
    4557  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    4558 
    4559  VmaDeviceMemoryBlock* GetBlock() const
    4560  {
    4561  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    4562  return m_BlockAllocation.m_Block;
    4563  }
    4564  VkDeviceSize GetOffset() const;
    4565  VkDeviceMemory GetMemory() const;
    4566  uint32_t GetMemoryTypeIndex() const;
    4567  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    4568  void* GetMappedData() const;
    4569  bool CanBecomeLost() const;
    4570  VmaPool GetPool() const;
    4571 
    4572  uint32_t GetLastUseFrameIndex() const
    4573  {
    4574  return m_LastUseFrameIndex.load();
    4575  }
    4576  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    4577  {
    4578  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    4579  }
    4580  /*
    4581  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    4582  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    4583  - Else, returns false.
    4584 
    4585  If hAllocation is already lost, assert - you should not call it then.
    4586  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    4587  */
    4588  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4589 
    4590  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    4591  {
    4592  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    4593  outInfo.blockCount = 1;
    4594  outInfo.allocationCount = 1;
    4595  outInfo.unusedRangeCount = 0;
    4596  outInfo.usedBytes = m_Size;
    4597  outInfo.unusedBytes = 0;
    4598  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    4599  outInfo.unusedRangeSizeMin = UINT64_MAX;
    4600  outInfo.unusedRangeSizeMax = 0;
    4601  }
    4602 
    4603  void BlockAllocMap();
    4604  void BlockAllocUnmap();
    4605  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    4606  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    4607 
    4608 #if VMA_STATS_STRING_ENABLED
    4609  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
    4610  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
    4611 
    4612  void InitBufferImageUsage(uint32_t bufferImageUsage)
    4613  {
    4614  VMA_ASSERT(m_BufferImageUsage == 0);
    4615  m_BufferImageUsage = bufferImageUsage;
    4616  }
    4617 
    4618  void PrintParameters(class VmaJsonWriter& json) const;
    4619 #endif
    4620 
    4621 private:
    4622  VkDeviceSize m_Alignment;
    4623  VkDeviceSize m_Size;
    4624  void* m_pUserData;
    4625  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    4626  uint8_t m_Type; // ALLOCATION_TYPE
    4627  uint8_t m_SuballocationType; // VmaSuballocationType
    4628  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    4629  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
    4630  uint8_t m_MapCount;
    4631  uint8_t m_Flags; // enum FLAGS
    4632 
    4633  // Allocation out of VmaDeviceMemoryBlock.
    4634  struct BlockAllocation
    4635  {
    4636  VmaPool m_hPool; // Null if belongs to general memory.
    4637  VmaDeviceMemoryBlock* m_Block;
    4638  VkDeviceSize m_Offset;
    4639  bool m_CanBecomeLost;
    4640  };
    4641 
    4642  // Allocation for an object that has its own private VkDeviceMemory.
    4643  struct DedicatedAllocation
    4644  {
    4645  uint32_t m_MemoryTypeIndex;
    4646  VkDeviceMemory m_hMemory;
    4647  void* m_pMappedData; // Not null means memory is mapped.
    4648  };
    4649 
    4650  union
    4651  {
    4652  // Allocation out of VmaDeviceMemoryBlock.
    4653  BlockAllocation m_BlockAllocation;
    4654  // Allocation for an object that has its own private VkDeviceMemory.
    4655  DedicatedAllocation m_DedicatedAllocation;
    4656  };
    4657 
    4658 #if VMA_STATS_STRING_ENABLED
    4659  uint32_t m_CreationFrameIndex;
    4660  uint32_t m_BufferImageUsage; // 0 if unknown.
    4661 #endif
    4662 
    4663  void FreeUserDataString(VmaAllocator hAllocator);
    4664 };
    4665 
    4666 /*
    4667 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    4668 allocated memory block or free.
    4669 */
    4670 struct VmaSuballocation
    4671 {
    4672  VkDeviceSize offset;
    4673  VkDeviceSize size;
    4674  VmaAllocation hAllocation;
    4675  VmaSuballocationType type;
    4676 };
    4677 
    4678 // Comparator for offsets.
    4679 struct VmaSuballocationOffsetLess
    4680 {
    4681  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4682  {
    4683  return lhs.offset < rhs.offset;
    4684  }
    4685 };
    4686 struct VmaSuballocationOffsetGreater
    4687 {
    4688  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4689  {
    4690  return lhs.offset > rhs.offset;
    4691  }
    4692 };
    4693 
    4694 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    4695 
    4696 // Cost of one additional allocation lost, as equivalent in bytes.
    4697 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    4698 
    4699 /*
    4700 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    4701 
    4702 If canMakeOtherLost was false:
    4703 - item points to a FREE suballocation.
    4704 - itemsToMakeLostCount is 0.
    4705 
    4706 If canMakeOtherLost was true:
    4707 - item points to first of sequence of suballocations, which are either FREE,
    4708  or point to VmaAllocations that can become lost.
    4709 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    4710  the requested allocation to succeed.
    4711 */
    4712 struct VmaAllocationRequest
    4713 {
    4714  VkDeviceSize offset;
    4715  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    4716  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    4717  VmaSuballocationList::iterator item;
    4718  size_t itemsToMakeLostCount;
    4719  void* customData;
    4720 
    4721  VkDeviceSize CalcCost() const
    4722  {
    4723  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    4724  }
    4725 };
    4726 
    4727 /*
    4728 Data structure used for bookkeeping of allocations and unused ranges of memory
    4729 in a single VkDeviceMemory block.
    4730 */
    4731 class VmaBlockMetadata
    4732 {
    4733 public:
    4734  VmaBlockMetadata(VmaAllocator hAllocator);
    4735  virtual ~VmaBlockMetadata() { }
    4736  virtual void Init(VkDeviceSize size) { m_Size = size; }
    4737 
    4738  // Validates all data structures inside this object. If not valid, returns false.
    4739  virtual bool Validate() const = 0;
    4740  VkDeviceSize GetSize() const { return m_Size; }
    4741  virtual size_t GetAllocationCount() const = 0;
    4742  virtual VkDeviceSize GetSumFreeSize() const = 0;
    4743  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
    4744  // Returns true if this block is empty - contains only single free suballocation.
    4745  virtual bool IsEmpty() const = 0;
    4746 
    4747  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
    4748  // Shouldn't modify blockCount.
    4749  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
    4750 
    4751 #if VMA_STATS_STRING_ENABLED
    4752  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
    4753 #endif
    4754 
    4755  // Tries to find a place for suballocation with given parameters inside this block.
    4756  // If succeeded, fills pAllocationRequest and returns true.
    4757  // If failed, returns false.
    4758  virtual bool CreateAllocationRequest(
    4759  uint32_t currentFrameIndex,
    4760  uint32_t frameInUseCount,
    4761  VkDeviceSize bufferImageGranularity,
    4762  VkDeviceSize allocSize,
    4763  VkDeviceSize allocAlignment,
    4764  bool upperAddress,
    4765  VmaSuballocationType allocType,
    4766  bool canMakeOtherLost,
    4767  uint32_t strategy, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* flags.
    4768  VmaAllocationRequest* pAllocationRequest) = 0;
    4769 
    4770  virtual bool MakeRequestedAllocationsLost(
    4771  uint32_t currentFrameIndex,
    4772  uint32_t frameInUseCount,
    4773  VmaAllocationRequest* pAllocationRequest) = 0;
    4774 
    4775  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
    4776 
    4777  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
    4778 
    4779  // Makes actual allocation based on request. Request must already be checked and valid.
    4780  virtual void Alloc(
    4781  const VmaAllocationRequest& request,
    4782  VmaSuballocationType type,
    4783  VkDeviceSize allocSize,
    4784  bool upperAddress,
    4785  VmaAllocation hAllocation) = 0;
    4786 
    4787  // Frees suballocation assigned to given memory region.
    4788  virtual void Free(const VmaAllocation allocation) = 0;
    4789  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
    4790 
    4791 protected:
    4792  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
    4793 
    4794 #if VMA_STATS_STRING_ENABLED
    4795  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
    4796  VkDeviceSize unusedBytes,
    4797  size_t allocationCount,
    4798  size_t unusedRangeCount) const;
    4799  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    4800  VkDeviceSize offset,
    4801  VmaAllocation hAllocation) const;
    4802  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    4803  VkDeviceSize offset,
    4804  VkDeviceSize size) const;
    4805  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
    4806 #endif
    4807 
    4808 private:
    4809  VkDeviceSize m_Size;
    4810  const VkAllocationCallbacks* m_pAllocationCallbacks;
    4811 };
    4812 
    4813 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
    4814  VMA_ASSERT(0 && "Validation failed: " #cond); \
    4815  return false; \
    4816  } } while(false)
    4817 
    4818 class VmaBlockMetadata_Generic : public VmaBlockMetadata
    4819 {
    4820  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
    4821 public:
    4822  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
    4823  virtual ~VmaBlockMetadata_Generic();
    4824  virtual void Init(VkDeviceSize size);
    4825 
    4826  virtual bool Validate() const;
    4827  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    4828  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    4829  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    4830  virtual bool IsEmpty() const;
    4831 
    4832  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    4833  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    4834 
    4835 #if VMA_STATS_STRING_ENABLED
    4836  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    4837 #endif
    4838 
    4839  virtual bool CreateAllocationRequest(
    4840  uint32_t currentFrameIndex,
    4841  uint32_t frameInUseCount,
    4842  VkDeviceSize bufferImageGranularity,
    4843  VkDeviceSize allocSize,
    4844  VkDeviceSize allocAlignment,
    4845  bool upperAddress,
    4846  VmaSuballocationType allocType,
    4847  bool canMakeOtherLost,
    4848  uint32_t strategy,
    4849  VmaAllocationRequest* pAllocationRequest);
    4850 
    4851  virtual bool MakeRequestedAllocationsLost(
    4852  uint32_t currentFrameIndex,
    4853  uint32_t frameInUseCount,
    4854  VmaAllocationRequest* pAllocationRequest);
    4855 
    4856  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4857 
    4858  virtual VkResult CheckCorruption(const void* pBlockData);
    4859 
    4860  virtual void Alloc(
    4861  const VmaAllocationRequest& request,
    4862  VmaSuballocationType type,
    4863  VkDeviceSize allocSize,
    4864  bool upperAddress,
    4865  VmaAllocation hAllocation);
    4866 
    4867  virtual void Free(const VmaAllocation allocation);
    4868  virtual void FreeAtOffset(VkDeviceSize offset);
    4869 
    4870 private:
    4871  uint32_t m_FreeCount;
    4872  VkDeviceSize m_SumFreeSize;
    4873  VmaSuballocationList m_Suballocations;
    4874  // Suballocations that are free and have size greater than certain threshold.
    4875  // Sorted by size, ascending.
    4876  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    4877 
    4878  bool ValidateFreeSuballocationList() const;
    4879 
    4880  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    4881  // If yes, fills pOffset and returns true. If no, returns false.
    4882  bool CheckAllocation(
    4883  uint32_t currentFrameIndex,
    4884  uint32_t frameInUseCount,
    4885  VkDeviceSize bufferImageGranularity,
    4886  VkDeviceSize allocSize,
    4887  VkDeviceSize allocAlignment,
    4888  VmaSuballocationType allocType,
    4889  VmaSuballocationList::const_iterator suballocItem,
    4890  bool canMakeOtherLost,
    4891  VkDeviceSize* pOffset,
    4892  size_t* itemsToMakeLostCount,
    4893  VkDeviceSize* pSumFreeSize,
    4894  VkDeviceSize* pSumItemSize) const;
    4895  // Given free suballocation, it merges it with following one, which must also be free.
    4896  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    4897  // Releases given suballocation, making it free.
    4898  // Merges it with adjacent free suballocations if applicable.
    4899  // Returns iterator to new free suballocation at this place.
    4900  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    4901  // Given free suballocation, it inserts it into sorted list of
    4902  // m_FreeSuballocationsBySize if it's suitable.
    4903  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    4904  // Given free suballocation, it removes it from sorted list of
    4905  // m_FreeSuballocationsBySize if it's suitable.
    4906  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    4907 };
    4908 
    4909 /*
    4910 Allocations and their references in internal data structure look like this:
    4911 
    4912 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
    4913 
    4914  0 +-------+
    4915  | |
    4916  | |
    4917  | |
    4918  +-------+
    4919  | Alloc | 1st[m_1stNullItemsBeginCount]
    4920  +-------+
    4921  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4922  +-------+
    4923  | ... |
    4924  +-------+
    4925  | Alloc | 1st[1st.size() - 1]
    4926  +-------+
    4927  | |
    4928  | |
    4929  | |
    4930 GetSize() +-------+
    4931 
    4932 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
    4933 
    4934  0 +-------+
    4935  | Alloc | 2nd[0]
    4936  +-------+
    4937  | Alloc | 2nd[1]
    4938  +-------+
    4939  | ... |
    4940  +-------+
    4941  | Alloc | 2nd[2nd.size() - 1]
    4942  +-------+
    4943  | |
    4944  | |
    4945  | |
    4946  +-------+
    4947  | Alloc | 1st[m_1stNullItemsBeginCount]
    4948  +-------+
    4949  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4950  +-------+
    4951  | ... |
    4952  +-------+
    4953  | Alloc | 1st[1st.size() - 1]
    4954  +-------+
    4955  | |
    4956 GetSize() +-------+
    4957 
    4958 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
    4959 
    4960  0 +-------+
    4961  | |
    4962  | |
    4963  | |
    4964  +-------+
    4965  | Alloc | 1st[m_1stNullItemsBeginCount]
    4966  +-------+
    4967  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4968  +-------+
    4969  | ... |
    4970  +-------+
    4971  | Alloc | 1st[1st.size() - 1]
    4972  +-------+
    4973  | |
    4974  | |
    4975  | |
    4976  +-------+
    4977  | Alloc | 2nd[2nd.size() - 1]
    4978  +-------+
    4979  | ... |
    4980  +-------+
    4981  | Alloc | 2nd[1]
    4982  +-------+
    4983  | Alloc | 2nd[0]
    4984 GetSize() +-------+
    4985 
    4986 */
    4987 class VmaBlockMetadata_Linear : public VmaBlockMetadata
    4988 {
    4989  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
    4990 public:
    4991  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
    4992  virtual ~VmaBlockMetadata_Linear();
    4993  virtual void Init(VkDeviceSize size);
    4994 
    4995  virtual bool Validate() const;
    4996  virtual size_t GetAllocationCount() const;
    4997  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    4998  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    4999  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
    5000 
    5001  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5002  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5003 
    5004 #if VMA_STATS_STRING_ENABLED
    5005  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5006 #endif
    5007 
    5008  virtual bool CreateAllocationRequest(
    5009  uint32_t currentFrameIndex,
    5010  uint32_t frameInUseCount,
    5011  VkDeviceSize bufferImageGranularity,
    5012  VkDeviceSize allocSize,
    5013  VkDeviceSize allocAlignment,
    5014  bool upperAddress,
    5015  VmaSuballocationType allocType,
    5016  bool canMakeOtherLost,
    5017  uint32_t strategy,
    5018  VmaAllocationRequest* pAllocationRequest);
    5019 
    5020  virtual bool MakeRequestedAllocationsLost(
    5021  uint32_t currentFrameIndex,
    5022  uint32_t frameInUseCount,
    5023  VmaAllocationRequest* pAllocationRequest);
    5024 
    5025  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5026 
    5027  virtual VkResult CheckCorruption(const void* pBlockData);
    5028 
    5029  virtual void Alloc(
    5030  const VmaAllocationRequest& request,
    5031  VmaSuballocationType type,
    5032  VkDeviceSize allocSize,
    5033  bool upperAddress,
    5034  VmaAllocation hAllocation);
    5035 
    5036  virtual void Free(const VmaAllocation allocation);
    5037  virtual void FreeAtOffset(VkDeviceSize offset);
    5038 
    5039 private:
    5040  /*
    5041  There are two suballocation vectors, used in ping-pong way.
    5042  The one with index m_1stVectorIndex is called 1st.
    5043  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
    5044  2nd can be non-empty only when 1st is not empty.
    5045  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
    5046  */
    5047  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
    5048 
    5049  enum SECOND_VECTOR_MODE
    5050  {
    5051  SECOND_VECTOR_EMPTY,
    5052  /*
    5053  Suballocations in 2nd vector are created later than the ones in 1st, but they
    5054  all have smaller offset.
    5055  */
    5056  SECOND_VECTOR_RING_BUFFER,
    5057  /*
    5058  Suballocations in 2nd vector are upper side of double stack.
    5059  They all have offsets higher than those in 1st vector.
    5060  Top of this stack means smaller offsets, but higher indices in this vector.
    5061  */
    5062  SECOND_VECTOR_DOUBLE_STACK,
    5063  };
    5064 
    5065  VkDeviceSize m_SumFreeSize;
    5066  SuballocationVectorType m_Suballocations0, m_Suballocations1;
    5067  uint32_t m_1stVectorIndex;
    5068  SECOND_VECTOR_MODE m_2ndVectorMode;
    5069 
    5070  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5071  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5072  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5073  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5074 
    5075  // Number of items in 1st vector with hAllocation = null at the beginning.
    5076  size_t m_1stNullItemsBeginCount;
    5077  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
    5078  size_t m_1stNullItemsMiddleCount;
    5079  // Number of items in 2nd vector with hAllocation = null.
    5080  size_t m_2ndNullItemsCount;
    5081 
    5082  bool ShouldCompact1st() const;
    5083  void CleanupAfterFree();
    5084 };
    5085 
    5086 /*
    5087 - GetSize() is the original size of allocated memory block.
    5088 - m_UsableSize is this size aligned down to a power of two.
    5089  All allocations and calculations happen relative to m_UsableSize.
    5090 - GetUnusableSize() is the difference between them.
    5091  It is repoted as separate, unused range, not available for allocations.
    5092 
    5093 Node at level 0 has size = m_UsableSize.
    5094 Each next level contains nodes with size 2 times smaller than current level.
    5095 m_LevelCount is the maximum number of levels to use in the current object.
    5096 */
    5097 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
    5098 {
    5099  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
    5100 public:
    5101  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
    5102  virtual ~VmaBlockMetadata_Buddy();
    5103  virtual void Init(VkDeviceSize size);
    5104 
    5105  virtual bool Validate() const;
    5106  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
    5107  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
    5108  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5109  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
    5110 
    5111  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5112  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5113 
    5114 #if VMA_STATS_STRING_ENABLED
    5115  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5116 #endif
    5117 
    5118  virtual bool CreateAllocationRequest(
    5119  uint32_t currentFrameIndex,
    5120  uint32_t frameInUseCount,
    5121  VkDeviceSize bufferImageGranularity,
    5122  VkDeviceSize allocSize,
    5123  VkDeviceSize allocAlignment,
    5124  bool upperAddress,
    5125  VmaSuballocationType allocType,
    5126  bool canMakeOtherLost,
    5127  uint32_t strategy,
    5128  VmaAllocationRequest* pAllocationRequest);
    5129 
    5130  virtual bool MakeRequestedAllocationsLost(
    5131  uint32_t currentFrameIndex,
    5132  uint32_t frameInUseCount,
    5133  VmaAllocationRequest* pAllocationRequest);
    5134 
    5135  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5136 
    5137  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
    5138 
    5139  virtual void Alloc(
    5140  const VmaAllocationRequest& request,
    5141  VmaSuballocationType type,
    5142  VkDeviceSize allocSize,
    5143  bool upperAddress,
    5144  VmaAllocation hAllocation);
    5145 
    5146  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
    5147  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
    5148 
    5149 private:
    5150  static const VkDeviceSize MIN_NODE_SIZE = 32;
    5151  static const size_t MAX_LEVELS = 30;
    5152 
    5153  struct ValidationContext
    5154  {
    5155  size_t calculatedAllocationCount;
    5156  size_t calculatedFreeCount;
    5157  VkDeviceSize calculatedSumFreeSize;
    5158 
    5159  ValidationContext() :
    5160  calculatedAllocationCount(0),
    5161  calculatedFreeCount(0),
    5162  calculatedSumFreeSize(0) { }
    5163  };
    5164 
    5165  struct Node
    5166  {
    5167  VkDeviceSize offset;
    5168  enum TYPE
    5169  {
    5170  TYPE_FREE,
    5171  TYPE_ALLOCATION,
    5172  TYPE_SPLIT,
    5173  TYPE_COUNT
    5174  } type;
    5175  Node* parent;
    5176  Node* buddy;
    5177 
    5178  union
    5179  {
    5180  struct
    5181  {
    5182  Node* prev;
    5183  Node* next;
    5184  } free;
    5185  struct
    5186  {
    5187  VmaAllocation alloc;
    5188  } allocation;
    5189  struct
    5190  {
    5191  Node* leftChild;
    5192  } split;
    5193  };
    5194  };
    5195 
    5196  // Size of the memory block aligned down to a power of two.
    5197  VkDeviceSize m_UsableSize;
    5198  uint32_t m_LevelCount;
    5199 
    5200  Node* m_Root;
    5201  struct {
    5202  Node* front;
    5203  Node* back;
    5204  } m_FreeList[MAX_LEVELS];
    5205  // Number of nodes in the tree with type == TYPE_ALLOCATION.
    5206  size_t m_AllocationCount;
    5207  // Number of nodes in the tree with type == TYPE_FREE.
    5208  size_t m_FreeCount;
    5209  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
    5210  VkDeviceSize m_SumFreeSize;
    5211 
    5212  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
    5213  void DeleteNode(Node* node);
    5214  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
    5215  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
    5216  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
    5217  // Alloc passed just for validation. Can be null.
    5218  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
    5219  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
    5220  // Adds node to the front of FreeList at given level.
    5221  // node->type must be FREE.
    5222  // node->free.prev, next can be undefined.
    5223  void AddToFreeListFront(uint32_t level, Node* node);
    5224  // Removes node from FreeList at given level.
    5225  // node->type must be FREE.
    5226  // node->free.prev, next stay untouched.
    5227  void RemoveFromFreeList(uint32_t level, Node* node);
    5228 
    5229 #if VMA_STATS_STRING_ENABLED
    5230  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
    5231 #endif
    5232 };
    5233 
    5234 /*
    5235 Represents a single block of device memory (`VkDeviceMemory`) with all the
    5236 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
    5237 
    5238 Thread-safety: This class must be externally synchronized.
    5239 */
    5240 class VmaDeviceMemoryBlock
    5241 {
    5242  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
    5243 public:
    5244  VmaBlockMetadata* m_pMetadata;
    5245 
    5246  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    5247 
    5248  ~VmaDeviceMemoryBlock()
    5249  {
    5250  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    5251  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5252  }
    5253 
    5254  // Always call after construction.
    5255  void Init(
    5256  VmaAllocator hAllocator,
    5257  uint32_t newMemoryTypeIndex,
    5258  VkDeviceMemory newMemory,
    5259  VkDeviceSize newSize,
    5260  uint32_t id,
    5261  uint32_t algorithm);
    5262  // Always call before destruction.
    5263  void Destroy(VmaAllocator allocator);
    5264 
    5265  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
    5266  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5267  uint32_t GetId() const { return m_Id; }
    5268  void* GetMappedData() const { return m_pMappedData; }
    5269 
    5270  // Validates all data structures inside this object. If not valid, returns false.
    5271  bool Validate() const;
    5272 
    5273  VkResult CheckCorruption(VmaAllocator hAllocator);
    5274 
    5275  // ppData can be null.
    5276  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
    5277  void Unmap(VmaAllocator hAllocator, uint32_t count);
    5278 
    5279  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5280  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5281 
    5282  VkResult BindBufferMemory(
    5283  const VmaAllocator hAllocator,
    5284  const VmaAllocation hAllocation,
    5285  VkBuffer hBuffer);
    5286  VkResult BindImageMemory(
    5287  const VmaAllocator hAllocator,
    5288  const VmaAllocation hAllocation,
    5289  VkImage hImage);
    5290 
    5291 private:
    5292  uint32_t m_MemoryTypeIndex;
    5293  uint32_t m_Id;
    5294  VkDeviceMemory m_hMemory;
    5295 
    5296  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
    5297  // Also protects m_MapCount, m_pMappedData.
    5298  VMA_MUTEX m_Mutex;
    5299  uint32_t m_MapCount;
    5300  void* m_pMappedData;
    5301 };
    5302 
    5303 struct VmaPointerLess
    5304 {
    5305  bool operator()(const void* lhs, const void* rhs) const
    5306  {
    5307  return lhs < rhs;
    5308  }
    5309 };
    5310 
    5311 class VmaDefragmentator;
    5312 
    5313 /*
    5314 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    5315 Vulkan memory type.
    5316 
    5317 Synchronized internally with a mutex.
    5318 */
    5319 struct VmaBlockVector
    5320 {
    5321  VMA_CLASS_NO_COPY(VmaBlockVector)
    5322 public:
    5323  VmaBlockVector(
    5324  VmaAllocator hAllocator,
    5325  uint32_t memoryTypeIndex,
    5326  VkDeviceSize preferredBlockSize,
    5327  size_t minBlockCount,
    5328  size_t maxBlockCount,
    5329  VkDeviceSize bufferImageGranularity,
    5330  uint32_t frameInUseCount,
    5331  bool isCustomPool,
    5332  bool explicitBlockSize,
    5333  uint32_t algorithm);
    5334  ~VmaBlockVector();
    5335 
    5336  VkResult CreateMinBlocks();
    5337 
    5338  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5339  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    5340  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    5341  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    5342  uint32_t GetAlgorithm() const { return m_Algorithm; }
    5343 
    5344  void GetPoolStats(VmaPoolStats* pStats);
    5345 
    5346  bool IsEmpty() const { return m_Blocks.empty(); }
    5347  bool IsCorruptionDetectionEnabled() const;
    5348 
    5349  VkResult Allocate(
    5350  VmaPool hCurrentPool,
    5351  uint32_t currentFrameIndex,
    5352  VkDeviceSize size,
    5353  VkDeviceSize alignment,
    5354  const VmaAllocationCreateInfo& createInfo,
    5355  VmaSuballocationType suballocType,
    5356  size_t allocationCount,
    5357  VmaAllocation* pAllocations);
    5358 
    5359  void Free(
    5360  VmaAllocation hAllocation);
    5361 
    5362  // Adds statistics of this BlockVector to pStats.
    5363  void AddStats(VmaStats* pStats);
    5364 
    5365 #if VMA_STATS_STRING_ENABLED
    5366  void PrintDetailedMap(class VmaJsonWriter& json);
    5367 #endif
    5368 
    5369  void MakePoolAllocationsLost(
    5370  uint32_t currentFrameIndex,
    5371  size_t* pLostAllocationCount);
    5372  VkResult CheckCorruption();
    5373 
    5374  VmaDefragmentator* EnsureDefragmentator(
    5375  VmaAllocator hAllocator,
    5376  uint32_t currentFrameIndex);
    5377 
    5378  VkResult Defragment(
    5379  VmaDefragmentationStats* pDefragmentationStats,
    5380  VkDeviceSize& maxBytesToMove,
    5381  uint32_t& maxAllocationsToMove);
    5382 
    5383  void DestroyDefragmentator();
    5384 
    5385 private:
    5386  friend class VmaDefragmentator;
    5387 
    5388  const VmaAllocator m_hAllocator;
    5389  const uint32_t m_MemoryTypeIndex;
    5390  const VkDeviceSize m_PreferredBlockSize;
    5391  const size_t m_MinBlockCount;
    5392  const size_t m_MaxBlockCount;
    5393  const VkDeviceSize m_BufferImageGranularity;
    5394  const uint32_t m_FrameInUseCount;
    5395  const bool m_IsCustomPool;
    5396  const bool m_ExplicitBlockSize;
    5397  const uint32_t m_Algorithm;
    5398  bool m_HasEmptyBlock;
    5399  VMA_MUTEX m_Mutex;
    5400  // Incrementally sorted by sumFreeSize, ascending.
    5401  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    5402  /* There can be at most one allocation that is completely empty - a
    5403  hysteresis to avoid pessimistic case of alternating creation and destruction
    5404  of a VkDeviceMemory. */
    5405  VmaDefragmentator* m_pDefragmentator;
    5406  uint32_t m_NextBlockId;
    5407 
    5408  VkDeviceSize CalcMaxBlockSize() const;
    5409 
    5410  // Finds and removes given block from vector.
    5411  void Remove(VmaDeviceMemoryBlock* pBlock);
    5412 
    5413  // Performs single step in sorting m_Blocks. They may not be fully sorted
    5414  // after this call.
    5415  void IncrementallySortBlocks();
    5416 
    5417  VkResult AllocatePage(
    5418  VmaPool hCurrentPool,
    5419  uint32_t currentFrameIndex,
    5420  VkDeviceSize size,
    5421  VkDeviceSize alignment,
    5422  const VmaAllocationCreateInfo& createInfo,
    5423  VmaSuballocationType suballocType,
    5424  VmaAllocation* pAllocation);
    5425 
    5426  // To be used only without CAN_MAKE_OTHER_LOST flag.
    5427  VkResult AllocateFromBlock(
    5428  VmaDeviceMemoryBlock* pBlock,
    5429  VmaPool hCurrentPool,
    5430  uint32_t currentFrameIndex,
    5431  VkDeviceSize size,
    5432  VkDeviceSize alignment,
    5433  VmaAllocationCreateFlags allocFlags,
    5434  void* pUserData,
    5435  VmaSuballocationType suballocType,
    5436  uint32_t strategy,
    5437  VmaAllocation* pAllocation);
    5438 
    5439  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    5440 };
    5441 
    5442 struct VmaPool_T
    5443 {
    5444  VMA_CLASS_NO_COPY(VmaPool_T)
    5445 public:
    5446  VmaBlockVector m_BlockVector;
    5447 
    5448  VmaPool_T(
    5449  VmaAllocator hAllocator,
    5450  const VmaPoolCreateInfo& createInfo,
    5451  VkDeviceSize preferredBlockSize);
    5452  ~VmaPool_T();
    5453 
    5454  uint32_t GetId() const { return m_Id; }
    5455  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
    5456 
    5457 #if VMA_STATS_STRING_ENABLED
    5458  //void PrintDetailedMap(class VmaStringBuilder& sb);
    5459 #endif
    5460 
    5461 private:
    5462  uint32_t m_Id;
    5463 };
    5464 
    5465 class VmaDefragmentator
    5466 {
    5467  VMA_CLASS_NO_COPY(VmaDefragmentator)
    5468 private:
    5469  const VmaAllocator m_hAllocator;
    5470  VmaBlockVector* const m_pBlockVector;
    5471  uint32_t m_CurrentFrameIndex;
    5472  VkDeviceSize m_BytesMoved;
    5473  uint32_t m_AllocationsMoved;
    5474 
    5475  struct AllocationInfo
    5476  {
    5477  VmaAllocation m_hAllocation;
    5478  VkBool32* m_pChanged;
    5479 
    5480  AllocationInfo() :
    5481  m_hAllocation(VK_NULL_HANDLE),
    5482  m_pChanged(VMA_NULL)
    5483  {
    5484  }
    5485  };
    5486 
    5487  struct AllocationInfoSizeGreater
    5488  {
    5489  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    5490  {
    5491  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    5492  }
    5493  };
    5494 
    5495  // Used between AddAllocation and Defragment.
    5496  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5497 
    5498  struct BlockInfo
    5499  {
    5500  VmaDeviceMemoryBlock* m_pBlock;
    5501  bool m_HasNonMovableAllocations;
    5502  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5503 
    5504  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    5505  m_pBlock(VMA_NULL),
    5506  m_HasNonMovableAllocations(true),
    5507  m_Allocations(pAllocationCallbacks),
    5508  m_pMappedDataForDefragmentation(VMA_NULL)
    5509  {
    5510  }
    5511 
    5512  void CalcHasNonMovableAllocations()
    5513  {
    5514  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
    5515  const size_t defragmentAllocCount = m_Allocations.size();
    5516  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    5517  }
    5518 
    5519  void SortAllocationsBySizeDescecnding()
    5520  {
    5521  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    5522  }
    5523 
    5524  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
    5525  void Unmap(VmaAllocator hAllocator);
    5526 
    5527  private:
    5528  // Not null if mapped for defragmentation only, not originally mapped.
    5529  void* m_pMappedDataForDefragmentation;
    5530  };
    5531 
    5532  struct BlockPointerLess
    5533  {
    5534  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    5535  {
    5536  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    5537  }
    5538  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5539  {
    5540  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    5541  }
    5542  };
    5543 
    5544  // 1. Blocks with some non-movable allocations go first.
    5545  // 2. Blocks with smaller sumFreeSize go first.
    5546  struct BlockInfoCompareMoveDestination
    5547  {
    5548  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5549  {
    5550  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    5551  {
    5552  return true;
    5553  }
    5554  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    5555  {
    5556  return false;
    5557  }
    5558  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
    5559  {
    5560  return true;
    5561  }
    5562  return false;
    5563  }
    5564  };
    5565 
    5566  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    5567  BlockInfoVector m_Blocks;
    5568 
    5569  VkResult DefragmentRound(
    5570  VkDeviceSize maxBytesToMove,
    5571  uint32_t maxAllocationsToMove);
    5572 
    5573  static bool MoveMakesSense(
    5574  size_t dstBlockIndex, VkDeviceSize dstOffset,
    5575  size_t srcBlockIndex, VkDeviceSize srcOffset);
    5576 
    5577 public:
    5578  VmaDefragmentator(
    5579  VmaAllocator hAllocator,
    5580  VmaBlockVector* pBlockVector,
    5581  uint32_t currentFrameIndex);
    5582 
    5583  ~VmaDefragmentator();
    5584 
    5585  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    5586  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    5587 
    5588  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    5589 
    5590  VkResult Defragment(
    5591  VkDeviceSize maxBytesToMove,
    5592  uint32_t maxAllocationsToMove);
    5593 };
    5594 
    5595 #if VMA_RECORDING_ENABLED
    5596 
    5597 class VmaRecorder
    5598 {
    5599 public:
    5600  VmaRecorder();
    5601  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
    5602  void WriteConfiguration(
    5603  const VkPhysicalDeviceProperties& devProps,
    5604  const VkPhysicalDeviceMemoryProperties& memProps,
    5605  bool dedicatedAllocationExtensionEnabled);
    5606  ~VmaRecorder();
    5607 
    5608  void RecordCreateAllocator(uint32_t frameIndex);
    5609  void RecordDestroyAllocator(uint32_t frameIndex);
    5610  void RecordCreatePool(uint32_t frameIndex,
    5611  const VmaPoolCreateInfo& createInfo,
    5612  VmaPool pool);
    5613  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
    5614  void RecordAllocateMemory(uint32_t frameIndex,
    5615  const VkMemoryRequirements& vkMemReq,
    5616  const VmaAllocationCreateInfo& createInfo,
    5617  VmaAllocation allocation);
    5618  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    5619  const VkMemoryRequirements& vkMemReq,
    5620  bool requiresDedicatedAllocation,
    5621  bool prefersDedicatedAllocation,
    5622  const VmaAllocationCreateInfo& createInfo,
    5623  VmaAllocation allocation);
    5624  void RecordAllocateMemoryForImage(uint32_t frameIndex,
    5625  const VkMemoryRequirements& vkMemReq,
    5626  bool requiresDedicatedAllocation,
    5627  bool prefersDedicatedAllocation,
    5628  const VmaAllocationCreateInfo& createInfo,
    5629  VmaAllocation allocation);
    5630  void RecordFreeMemory(uint32_t frameIndex,
    5631  VmaAllocation allocation);
    5632  void RecordSetAllocationUserData(uint32_t frameIndex,
    5633  VmaAllocation allocation,
    5634  const void* pUserData);
    5635  void RecordCreateLostAllocation(uint32_t frameIndex,
    5636  VmaAllocation allocation);
    5637  void RecordMapMemory(uint32_t frameIndex,
    5638  VmaAllocation allocation);
    5639  void RecordUnmapMemory(uint32_t frameIndex,
    5640  VmaAllocation allocation);
    5641  void RecordFlushAllocation(uint32_t frameIndex,
    5642  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5643  void RecordInvalidateAllocation(uint32_t frameIndex,
    5644  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5645  void RecordCreateBuffer(uint32_t frameIndex,
    5646  const VkBufferCreateInfo& bufCreateInfo,
    5647  const VmaAllocationCreateInfo& allocCreateInfo,
    5648  VmaAllocation allocation);
    5649  void RecordCreateImage(uint32_t frameIndex,
    5650  const VkImageCreateInfo& imageCreateInfo,
    5651  const VmaAllocationCreateInfo& allocCreateInfo,
    5652  VmaAllocation allocation);
    5653  void RecordDestroyBuffer(uint32_t frameIndex,
    5654  VmaAllocation allocation);
    5655  void RecordDestroyImage(uint32_t frameIndex,
    5656  VmaAllocation allocation);
    5657  void RecordTouchAllocation(uint32_t frameIndex,
    5658  VmaAllocation allocation);
    5659  void RecordGetAllocationInfo(uint32_t frameIndex,
    5660  VmaAllocation allocation);
    5661  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
    5662  VmaPool pool);
    5663 
    5664 private:
    5665  struct CallParams
    5666  {
    5667  uint32_t threadId;
    5668  double time;
    5669  };
    5670 
    5671  class UserDataString
    5672  {
    5673  public:
    5674  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
    5675  const char* GetString() const { return m_Str; }
    5676 
    5677  private:
    5678  char m_PtrStr[17];
    5679  const char* m_Str;
    5680  };
    5681 
    5682  bool m_UseMutex;
    5683  VmaRecordFlags m_Flags;
    5684  FILE* m_File;
    5685  VMA_MUTEX m_FileMutex;
    5686  int64_t m_Freq;
    5687  int64_t m_StartCounter;
    5688 
    5689  void GetBasicParams(CallParams& outParams);
    5690  void Flush();
    5691 };
    5692 
    5693 #endif // #if VMA_RECORDING_ENABLED
    5694 
    5695 // Main allocator object.
    5696 struct VmaAllocator_T
    5697 {
    5698  VMA_CLASS_NO_COPY(VmaAllocator_T)
    5699 public:
    5700  bool m_UseMutex;
    5701  bool m_UseKhrDedicatedAllocation;
    5702  VkDevice m_hDevice;
    5703  bool m_AllocationCallbacksSpecified;
    5704  VkAllocationCallbacks m_AllocationCallbacks;
    5705  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    5706 
    5707  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
    5708  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    5709  VMA_MUTEX m_HeapSizeLimitMutex;
    5710 
    5711  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    5712  VkPhysicalDeviceMemoryProperties m_MemProps;
    5713 
    5714  // Default pools.
    5715  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    5716 
    5717  // Each vector is sorted by memory (handle value).
    5718  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    5719  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    5720  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    5721 
    5722  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    5723  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
    5724  ~VmaAllocator_T();
    5725 
    5726  const VkAllocationCallbacks* GetAllocationCallbacks() const
    5727  {
    5728  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    5729  }
    5730  const VmaVulkanFunctions& GetVulkanFunctions() const
    5731  {
    5732  return m_VulkanFunctions;
    5733  }
    5734 
    5735  VkDeviceSize GetBufferImageGranularity() const
    5736  {
    5737  return VMA_MAX(
    5738  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    5739  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    5740  }
    5741 
    5742  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    5743  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    5744 
    5745  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    5746  {
    5747  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    5748  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    5749  }
    5750  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
    5751  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
    5752  {
    5753  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
    5754  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    5755  }
    5756  // Minimum alignment for all allocations in specific memory type.
    5757  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
    5758  {
    5759  return IsMemoryTypeNonCoherent(memTypeIndex) ?
    5760  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
    5761  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
    5762  }
    5763 
    5764  bool IsIntegratedGpu() const
    5765  {
    5766  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
    5767  }
    5768 
    5769 #if VMA_RECORDING_ENABLED
    5770  VmaRecorder* GetRecorder() const { return m_pRecorder; }
    5771 #endif
    5772 
    5773  void GetBufferMemoryRequirements(
    5774  VkBuffer hBuffer,
    5775  VkMemoryRequirements& memReq,
    5776  bool& requiresDedicatedAllocation,
    5777  bool& prefersDedicatedAllocation) const;
    5778  void GetImageMemoryRequirements(
    5779  VkImage hImage,
    5780  VkMemoryRequirements& memReq,
    5781  bool& requiresDedicatedAllocation,
    5782  bool& prefersDedicatedAllocation) const;
    5783 
    5784  // Main allocation function.
    5785  VkResult AllocateMemory(
    5786  const VkMemoryRequirements& vkMemReq,
    5787  bool requiresDedicatedAllocation,
    5788  bool prefersDedicatedAllocation,
    5789  VkBuffer dedicatedBuffer,
    5790  VkImage dedicatedImage,
    5791  const VmaAllocationCreateInfo& createInfo,
    5792  VmaSuballocationType suballocType,
    5793  size_t allocationCount,
    5794  VmaAllocation* pAllocations);
    5795 
    5796  // Main deallocation function.
    5797  void FreeMemory(
    5798  size_t allocationCount,
    5799  const VmaAllocation* pAllocations);
    5800 
    5801  void CalculateStats(VmaStats* pStats);
    5802 
    5803 #if VMA_STATS_STRING_ENABLED
    5804  void PrintDetailedMap(class VmaJsonWriter& json);
    5805 #endif
    5806 
    5807  VkResult Defragment(
    5808  VmaAllocation* pAllocations,
    5809  size_t allocationCount,
    5810  VkBool32* pAllocationsChanged,
    5811  const VmaDefragmentationInfo* pDefragmentationInfo,
    5812  VmaDefragmentationStats* pDefragmentationStats);
    5813 
    5814  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    5815  bool TouchAllocation(VmaAllocation hAllocation);
    5816 
    5817  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    5818  void DestroyPool(VmaPool pool);
    5819  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    5820 
    5821  void SetCurrentFrameIndex(uint32_t frameIndex);
    5822  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
    5823 
    5824  void MakePoolAllocationsLost(
    5825  VmaPool hPool,
    5826  size_t* pLostAllocationCount);
    5827  VkResult CheckPoolCorruption(VmaPool hPool);
    5828  VkResult CheckCorruption(uint32_t memoryTypeBits);
    5829 
    5830  void CreateLostAllocation(VmaAllocation* pAllocation);
    5831 
    5832  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    5833  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    5834 
    5835  VkResult Map(VmaAllocation hAllocation, void** ppData);
    5836  void Unmap(VmaAllocation hAllocation);
    5837 
    5838  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
    5839  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
    5840 
    5841  void FlushOrInvalidateAllocation(
    5842  VmaAllocation hAllocation,
    5843  VkDeviceSize offset, VkDeviceSize size,
    5844  VMA_CACHE_OPERATION op);
    5845 
    5846  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
    5847 
    5848 private:
    5849  VkDeviceSize m_PreferredLargeHeapBlockSize;
    5850 
    5851  VkPhysicalDevice m_PhysicalDevice;
    5852  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    5853 
    5854  VMA_MUTEX m_PoolsMutex;
    5855  // Protected by m_PoolsMutex. Sorted by pointer value.
    5856  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    5857  uint32_t m_NextPoolId;
    5858 
    5859  VmaVulkanFunctions m_VulkanFunctions;
    5860 
    5861 #if VMA_RECORDING_ENABLED
    5862  VmaRecorder* m_pRecorder;
    5863 #endif
    5864 
    5865  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    5866 
    5867  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    5868 
    5869  VkResult AllocateMemoryOfType(
    5870  VkDeviceSize size,
    5871  VkDeviceSize alignment,
    5872  bool dedicatedAllocation,
    5873  VkBuffer dedicatedBuffer,
    5874  VkImage dedicatedImage,
    5875  const VmaAllocationCreateInfo& createInfo,
    5876  uint32_t memTypeIndex,
    5877  VmaSuballocationType suballocType,
    5878  size_t allocationCount,
    5879  VmaAllocation* pAllocations);
    5880 
    5881  // Helper function only to be used inside AllocateDedicatedMemory.
    5882  VkResult AllocateDedicatedMemoryPage(
    5883  VkDeviceSize size,
    5884  VmaSuballocationType suballocType,
    5885  uint32_t memTypeIndex,
    5886  const VkMemoryAllocateInfo& allocInfo,
    5887  bool map,
    5888  bool isUserDataString,
    5889  void* pUserData,
    5890  VmaAllocation* pAllocation);
    5891 
    5892  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
    5893  VkResult AllocateDedicatedMemory(
    5894  VkDeviceSize size,
    5895  VmaSuballocationType suballocType,
    5896  uint32_t memTypeIndex,
    5897  bool map,
    5898  bool isUserDataString,
    5899  void* pUserData,
    5900  VkBuffer dedicatedBuffer,
    5901  VkImage dedicatedImage,
    5902  size_t allocationCount,
    5903  VmaAllocation* pAllocations);
    5904 
    5905  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    5906  void FreeDedicatedMemory(VmaAllocation allocation);
    5907 };
    5908 
    5910 // Memory allocation #2 after VmaAllocator_T definition
    5911 
    5912 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    5913 {
    5914  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    5915 }
    5916 
    5917 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    5918 {
    5919  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    5920 }
    5921 
    5922 template<typename T>
    5923 static T* VmaAllocate(VmaAllocator hAllocator)
    5924 {
    5925  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    5926 }
    5927 
    5928 template<typename T>
    5929 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    5930 {
    5931  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    5932 }
    5933 
    5934 template<typename T>
    5935 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    5936 {
    5937  if(ptr != VMA_NULL)
    5938  {
    5939  ptr->~T();
    5940  VmaFree(hAllocator, ptr);
    5941  }
    5942 }
    5943 
    5944 template<typename T>
    5945 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    5946 {
    5947  if(ptr != VMA_NULL)
    5948  {
    5949  for(size_t i = count; i--; )
    5950  ptr[i].~T();
    5951  VmaFree(hAllocator, ptr);
    5952  }
    5953 }
    5954 
    5956 // VmaStringBuilder
    5957 
    5958 #if VMA_STATS_STRING_ENABLED
    5959 
    5960 class VmaStringBuilder
    5961 {
    5962 public:
    5963  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    5964  size_t GetLength() const { return m_Data.size(); }
    5965  const char* GetData() const { return m_Data.data(); }
    5966 
    5967  void Add(char ch) { m_Data.push_back(ch); }
    5968  void Add(const char* pStr);
    5969  void AddNewLine() { Add('\n'); }
    5970  void AddNumber(uint32_t num);
    5971  void AddNumber(uint64_t num);
    5972  void AddPointer(const void* ptr);
    5973 
    5974 private:
    5975  VmaVector< char, VmaStlAllocator<char> > m_Data;
    5976 };
    5977 
    5978 void VmaStringBuilder::Add(const char* pStr)
    5979 {
    5980  const size_t strLen = strlen(pStr);
    5981  if(strLen > 0)
    5982  {
    5983  const size_t oldCount = m_Data.size();
    5984  m_Data.resize(oldCount + strLen);
    5985  memcpy(m_Data.data() + oldCount, pStr, strLen);
    5986  }
    5987 }
    5988 
    5989 void VmaStringBuilder::AddNumber(uint32_t num)
    5990 {
    5991  char buf[11];
    5992  VmaUint32ToStr(buf, sizeof(buf), num);
    5993  Add(buf);
    5994 }
    5995 
    5996 void VmaStringBuilder::AddNumber(uint64_t num)
    5997 {
    5998  char buf[21];
    5999  VmaUint64ToStr(buf, sizeof(buf), num);
    6000  Add(buf);
    6001 }
    6002 
    6003 void VmaStringBuilder::AddPointer(const void* ptr)
    6004 {
    6005  char buf[21];
    6006  VmaPtrToStr(buf, sizeof(buf), ptr);
    6007  Add(buf);
    6008 }
    6009 
    6010 #endif // #if VMA_STATS_STRING_ENABLED
    6011 
    6013 // VmaJsonWriter
    6014 
    6015 #if VMA_STATS_STRING_ENABLED
    6016 
    6017 class VmaJsonWriter
    6018 {
    6019  VMA_CLASS_NO_COPY(VmaJsonWriter)
    6020 public:
    6021  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    6022  ~VmaJsonWriter();
    6023 
    6024  void BeginObject(bool singleLine = false);
    6025  void EndObject();
    6026 
    6027  void BeginArray(bool singleLine = false);
    6028  void EndArray();
    6029 
    6030  void WriteString(const char* pStr);
    6031  void BeginString(const char* pStr = VMA_NULL);
    6032  void ContinueString(const char* pStr);
    6033  void ContinueString(uint32_t n);
    6034  void ContinueString(uint64_t n);
    6035  void ContinueString_Pointer(const void* ptr);
    6036  void EndString(const char* pStr = VMA_NULL);
    6037 
    6038  void WriteNumber(uint32_t n);
    6039  void WriteNumber(uint64_t n);
    6040  void WriteBool(bool b);
    6041  void WriteNull();
    6042 
    6043 private:
    6044  static const char* const INDENT;
    6045 
    6046  enum COLLECTION_TYPE
    6047  {
    6048  COLLECTION_TYPE_OBJECT,
    6049  COLLECTION_TYPE_ARRAY,
    6050  };
    6051  struct StackItem
    6052  {
    6053  COLLECTION_TYPE type;
    6054  uint32_t valueCount;
    6055  bool singleLineMode;
    6056  };
    6057 
    6058  VmaStringBuilder& m_SB;
    6059  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    6060  bool m_InsideString;
    6061 
    6062  void BeginValue(bool isString);
    6063  void WriteIndent(bool oneLess = false);
    6064 };
    6065 
    6066 const char* const VmaJsonWriter::INDENT = " ";
    6067 
    6068 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    6069  m_SB(sb),
    6070  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    6071  m_InsideString(false)
    6072 {
    6073 }
    6074 
    6075 VmaJsonWriter::~VmaJsonWriter()
    6076 {
    6077  VMA_ASSERT(!m_InsideString);
    6078  VMA_ASSERT(m_Stack.empty());
    6079 }
    6080 
    6081 void VmaJsonWriter::BeginObject(bool singleLine)
    6082 {
    6083  VMA_ASSERT(!m_InsideString);
    6084 
    6085  BeginValue(false);
    6086  m_SB.Add('{');
    6087 
    6088  StackItem item;
    6089  item.type = COLLECTION_TYPE_OBJECT;
    6090  item.valueCount = 0;
    6091  item.singleLineMode = singleLine;
    6092  m_Stack.push_back(item);
    6093 }
    6094 
    6095 void VmaJsonWriter::EndObject()
    6096 {
    6097  VMA_ASSERT(!m_InsideString);
    6098 
    6099  WriteIndent(true);
    6100  m_SB.Add('}');
    6101 
    6102  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    6103  m_Stack.pop_back();
    6104 }
    6105 
    6106 void VmaJsonWriter::BeginArray(bool singleLine)
    6107 {
    6108  VMA_ASSERT(!m_InsideString);
    6109 
    6110  BeginValue(false);
    6111  m_SB.Add('[');
    6112 
    6113  StackItem item;
    6114  item.type = COLLECTION_TYPE_ARRAY;
    6115  item.valueCount = 0;
    6116  item.singleLineMode = singleLine;
    6117  m_Stack.push_back(item);
    6118 }
    6119 
    6120 void VmaJsonWriter::EndArray()
    6121 {
    6122  VMA_ASSERT(!m_InsideString);
    6123 
    6124  WriteIndent(true);
    6125  m_SB.Add(']');
    6126 
    6127  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    6128  m_Stack.pop_back();
    6129 }
    6130 
    6131 void VmaJsonWriter::WriteString(const char* pStr)
    6132 {
    6133  BeginString(pStr);
    6134  EndString();
    6135 }
    6136 
    6137 void VmaJsonWriter::BeginString(const char* pStr)
    6138 {
    6139  VMA_ASSERT(!m_InsideString);
    6140 
    6141  BeginValue(true);
    6142  m_SB.Add('"');
    6143  m_InsideString = true;
    6144  if(pStr != VMA_NULL && pStr[0] != '\0')
    6145  {
    6146  ContinueString(pStr);
    6147  }
    6148 }
    6149 
    6150 void VmaJsonWriter::ContinueString(const char* pStr)
    6151 {
    6152  VMA_ASSERT(m_InsideString);
    6153 
    6154  const size_t strLen = strlen(pStr);
    6155  for(size_t i = 0; i < strLen; ++i)
    6156  {
    6157  char ch = pStr[i];
    6158  if(ch == '\\')
    6159  {
    6160  m_SB.Add("\\\\");
    6161  }
    6162  else if(ch == '"')
    6163  {
    6164  m_SB.Add("\\\"");
    6165  }
    6166  else if(ch >= 32)
    6167  {
    6168  m_SB.Add(ch);
    6169  }
    6170  else switch(ch)
    6171  {
    6172  case '\b':
    6173  m_SB.Add("\\b");
    6174  break;
    6175  case '\f':
    6176  m_SB.Add("\\f");
    6177  break;
    6178  case '\n':
    6179  m_SB.Add("\\n");
    6180  break;
    6181  case '\r':
    6182  m_SB.Add("\\r");
    6183  break;
    6184  case '\t':
    6185  m_SB.Add("\\t");
    6186  break;
    6187  default:
    6188  VMA_ASSERT(0 && "Character not currently supported.");
    6189  break;
    6190  }
    6191  }
    6192 }
    6193 
    6194 void VmaJsonWriter::ContinueString(uint32_t n)
    6195 {
    6196  VMA_ASSERT(m_InsideString);
    6197  m_SB.AddNumber(n);
    6198 }
    6199 
    6200 void VmaJsonWriter::ContinueString(uint64_t n)
    6201 {
    6202  VMA_ASSERT(m_InsideString);
    6203  m_SB.AddNumber(n);
    6204 }
    6205 
    6206 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
    6207 {
    6208  VMA_ASSERT(m_InsideString);
    6209  m_SB.AddPointer(ptr);
    6210 }
    6211 
    6212 void VmaJsonWriter::EndString(const char* pStr)
    6213 {
    6214  VMA_ASSERT(m_InsideString);
    6215  if(pStr != VMA_NULL && pStr[0] != '\0')
    6216  {
    6217  ContinueString(pStr);
    6218  }
    6219  m_SB.Add('"');
    6220  m_InsideString = false;
    6221 }
    6222 
    6223 void VmaJsonWriter::WriteNumber(uint32_t n)
    6224 {
    6225  VMA_ASSERT(!m_InsideString);
    6226  BeginValue(false);
    6227  m_SB.AddNumber(n);
    6228 }
    6229 
    6230 void VmaJsonWriter::WriteNumber(uint64_t n)
    6231 {
    6232  VMA_ASSERT(!m_InsideString);
    6233  BeginValue(false);
    6234  m_SB.AddNumber(n);
    6235 }
    6236 
    6237 void VmaJsonWriter::WriteBool(bool b)
    6238 {
    6239  VMA_ASSERT(!m_InsideString);
    6240  BeginValue(false);
    6241  m_SB.Add(b ? "true" : "false");
    6242 }
    6243 
    6244 void VmaJsonWriter::WriteNull()
    6245 {
    6246  VMA_ASSERT(!m_InsideString);
    6247  BeginValue(false);
    6248  m_SB.Add("null");
    6249 }
    6250 
    6251 void VmaJsonWriter::BeginValue(bool isString)
    6252 {
    6253  if(!m_Stack.empty())
    6254  {
    6255  StackItem& currItem = m_Stack.back();
    6256  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6257  currItem.valueCount % 2 == 0)
    6258  {
    6259  VMA_ASSERT(isString);
    6260  }
    6261 
    6262  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6263  currItem.valueCount % 2 != 0)
    6264  {
    6265  m_SB.Add(": ");
    6266  }
    6267  else if(currItem.valueCount > 0)
    6268  {
    6269  m_SB.Add(", ");
    6270  WriteIndent();
    6271  }
    6272  else
    6273  {
    6274  WriteIndent();
    6275  }
    6276  ++currItem.valueCount;
    6277  }
    6278 }
    6279 
    6280 void VmaJsonWriter::WriteIndent(bool oneLess)
    6281 {
    6282  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    6283  {
    6284  m_SB.AddNewLine();
    6285 
    6286  size_t count = m_Stack.size();
    6287  if(count > 0 && oneLess)
    6288  {
    6289  --count;
    6290  }
    6291  for(size_t i = 0; i < count; ++i)
    6292  {
    6293  m_SB.Add(INDENT);
    6294  }
    6295  }
    6296 }
    6297 
    6298 #endif // #if VMA_STATS_STRING_ENABLED
    6299 
    6301 
    6302 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
    6303 {
    6304  if(IsUserDataString())
    6305  {
    6306  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
    6307 
    6308  FreeUserDataString(hAllocator);
    6309 
    6310  if(pUserData != VMA_NULL)
    6311  {
    6312  const char* const newStrSrc = (char*)pUserData;
    6313  const size_t newStrLen = strlen(newStrSrc);
    6314  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
    6315  memcpy(newStrDst, newStrSrc, newStrLen + 1);
    6316  m_pUserData = newStrDst;
    6317  }
    6318  }
    6319  else
    6320  {
    6321  m_pUserData = pUserData;
    6322  }
    6323 }
    6324 
    6325 void VmaAllocation_T::ChangeBlockAllocation(
    6326  VmaAllocator hAllocator,
    6327  VmaDeviceMemoryBlock* block,
    6328  VkDeviceSize offset)
    6329 {
    6330  VMA_ASSERT(block != VMA_NULL);
    6331  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6332 
    6333  // Move mapping reference counter from old block to new block.
    6334  if(block != m_BlockAllocation.m_Block)
    6335  {
    6336  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
    6337  if(IsPersistentMap())
    6338  ++mapRefCount;
    6339  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
    6340  block->Map(hAllocator, mapRefCount, VMA_NULL);
    6341  }
    6342 
    6343  m_BlockAllocation.m_Block = block;
    6344  m_BlockAllocation.m_Offset = offset;
    6345 }
    6346 
    6347 VkDeviceSize VmaAllocation_T::GetOffset() const
    6348 {
    6349  switch(m_Type)
    6350  {
    6351  case ALLOCATION_TYPE_BLOCK:
    6352  return m_BlockAllocation.m_Offset;
    6353  case ALLOCATION_TYPE_DEDICATED:
    6354  return 0;
    6355  default:
    6356  VMA_ASSERT(0);
    6357  return 0;
    6358  }
    6359 }
    6360 
    6361 VkDeviceMemory VmaAllocation_T::GetMemory() const
    6362 {
    6363  switch(m_Type)
    6364  {
    6365  case ALLOCATION_TYPE_BLOCK:
    6366  return m_BlockAllocation.m_Block->GetDeviceMemory();
    6367  case ALLOCATION_TYPE_DEDICATED:
    6368  return m_DedicatedAllocation.m_hMemory;
    6369  default:
    6370  VMA_ASSERT(0);
    6371  return VK_NULL_HANDLE;
    6372  }
    6373 }
    6374 
    6375 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    6376 {
    6377  switch(m_Type)
    6378  {
    6379  case ALLOCATION_TYPE_BLOCK:
    6380  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
    6381  case ALLOCATION_TYPE_DEDICATED:
    6382  return m_DedicatedAllocation.m_MemoryTypeIndex;
    6383  default:
    6384  VMA_ASSERT(0);
    6385  return UINT32_MAX;
    6386  }
    6387 }
    6388 
    6389 void* VmaAllocation_T::GetMappedData() const
    6390 {
    6391  switch(m_Type)
    6392  {
    6393  case ALLOCATION_TYPE_BLOCK:
    6394  if(m_MapCount != 0)
    6395  {
    6396  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
    6397  VMA_ASSERT(pBlockData != VMA_NULL);
    6398  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    6399  }
    6400  else
    6401  {
    6402  return VMA_NULL;
    6403  }
    6404  break;
    6405  case ALLOCATION_TYPE_DEDICATED:
    6406  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    6407  return m_DedicatedAllocation.m_pMappedData;
    6408  default:
    6409  VMA_ASSERT(0);
    6410  return VMA_NULL;
    6411  }
    6412 }
    6413 
    6414 bool VmaAllocation_T::CanBecomeLost() const
    6415 {
    6416  switch(m_Type)
    6417  {
    6418  case ALLOCATION_TYPE_BLOCK:
    6419  return m_BlockAllocation.m_CanBecomeLost;
    6420  case ALLOCATION_TYPE_DEDICATED:
    6421  return false;
    6422  default:
    6423  VMA_ASSERT(0);
    6424  return false;
    6425  }
    6426 }
    6427 
    6428 VmaPool VmaAllocation_T::GetPool() const
    6429 {
    6430  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6431  return m_BlockAllocation.m_hPool;
    6432 }
    6433 
    6434 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    6435 {
    6436  VMA_ASSERT(CanBecomeLost());
    6437 
    6438  /*
    6439  Warning: This is a carefully designed algorithm.
    6440  Do not modify unless you really know what you're doing :)
    6441  */
    6442  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    6443  for(;;)
    6444  {
    6445  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    6446  {
    6447  VMA_ASSERT(0);
    6448  return false;
    6449  }
    6450  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    6451  {
    6452  return false;
    6453  }
    6454  else // Last use time earlier than current time.
    6455  {
    6456  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    6457  {
    6458  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    6459  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    6460  return true;
    6461  }
    6462  }
    6463  }
    6464 }
    6465 
    6466 #if VMA_STATS_STRING_ENABLED
    6467 
    6468 // Correspond to values of enum VmaSuballocationType.
    6469 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    6470  "FREE",
    6471  "UNKNOWN",
    6472  "BUFFER",
    6473  "IMAGE_UNKNOWN",
    6474  "IMAGE_LINEAR",
    6475  "IMAGE_OPTIMAL",
    6476 };
    6477 
    6478 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
    6479 {
    6480  json.WriteString("Type");
    6481  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
    6482 
    6483  json.WriteString("Size");
    6484  json.WriteNumber(m_Size);
    6485 
    6486  if(m_pUserData != VMA_NULL)
    6487  {
    6488  json.WriteString("UserData");
    6489  if(IsUserDataString())
    6490  {
    6491  json.WriteString((const char*)m_pUserData);
    6492  }
    6493  else
    6494  {
    6495  json.BeginString();
    6496  json.ContinueString_Pointer(m_pUserData);
    6497  json.EndString();
    6498  }
    6499  }
    6500 
    6501  json.WriteString("CreationFrameIndex");
    6502  json.WriteNumber(m_CreationFrameIndex);
    6503 
    6504  json.WriteString("LastUseFrameIndex");
    6505  json.WriteNumber(GetLastUseFrameIndex());
    6506 
    6507  if(m_BufferImageUsage != 0)
    6508  {
    6509  json.WriteString("Usage");
    6510  json.WriteNumber(m_BufferImageUsage);
    6511  }
    6512 }
    6513 
    6514 #endif
    6515 
    6516 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
    6517 {
    6518  VMA_ASSERT(IsUserDataString());
    6519  if(m_pUserData != VMA_NULL)
    6520  {
    6521  char* const oldStr = (char*)m_pUserData;
    6522  const size_t oldStrLen = strlen(oldStr);
    6523  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
    6524  m_pUserData = VMA_NULL;
    6525  }
    6526 }
    6527 
    6528 void VmaAllocation_T::BlockAllocMap()
    6529 {
    6530  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6531 
    6532  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6533  {
    6534  ++m_MapCount;
    6535  }
    6536  else
    6537  {
    6538  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
    6539  }
    6540 }
    6541 
    6542 void VmaAllocation_T::BlockAllocUnmap()
    6543 {
    6544  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6545 
    6546  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6547  {
    6548  --m_MapCount;
    6549  }
    6550  else
    6551  {
    6552  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
    6553  }
    6554 }
    6555 
    6556 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    6557 {
    6558  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6559 
    6560  if(m_MapCount != 0)
    6561  {
    6562  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6563  {
    6564  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    6565  *ppData = m_DedicatedAllocation.m_pMappedData;
    6566  ++m_MapCount;
    6567  return VK_SUCCESS;
    6568  }
    6569  else
    6570  {
    6571  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    6572  return VK_ERROR_MEMORY_MAP_FAILED;
    6573  }
    6574  }
    6575  else
    6576  {
    6577  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    6578  hAllocator->m_hDevice,
    6579  m_DedicatedAllocation.m_hMemory,
    6580  0, // offset
    6581  VK_WHOLE_SIZE,
    6582  0, // flags
    6583  ppData);
    6584  if(result == VK_SUCCESS)
    6585  {
    6586  m_DedicatedAllocation.m_pMappedData = *ppData;
    6587  m_MapCount = 1;
    6588  }
    6589  return result;
    6590  }
    6591 }
    6592 
    6593 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    6594 {
    6595  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6596 
    6597  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6598  {
    6599  --m_MapCount;
    6600  if(m_MapCount == 0)
    6601  {
    6602  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    6603  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    6604  hAllocator->m_hDevice,
    6605  m_DedicatedAllocation.m_hMemory);
    6606  }
    6607  }
    6608  else
    6609  {
    6610  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    6611  }
    6612 }
    6613 
    6614 #if VMA_STATS_STRING_ENABLED
    6615 
    6616 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    6617 {
    6618  json.BeginObject();
    6619 
    6620  json.WriteString("Blocks");
    6621  json.WriteNumber(stat.blockCount);
    6622 
    6623  json.WriteString("Allocations");
    6624  json.WriteNumber(stat.allocationCount);
    6625 
    6626  json.WriteString("UnusedRanges");
    6627  json.WriteNumber(stat.unusedRangeCount);
    6628 
    6629  json.WriteString("UsedBytes");
    6630  json.WriteNumber(stat.usedBytes);
    6631 
    6632  json.WriteString("UnusedBytes");
    6633  json.WriteNumber(stat.unusedBytes);
    6634 
    6635  if(stat.allocationCount > 1)
    6636  {
    6637  json.WriteString("AllocationSize");
    6638  json.BeginObject(true);
    6639  json.WriteString("Min");
    6640  json.WriteNumber(stat.allocationSizeMin);
    6641  json.WriteString("Avg");
    6642  json.WriteNumber(stat.allocationSizeAvg);
    6643  json.WriteString("Max");
    6644  json.WriteNumber(stat.allocationSizeMax);
    6645  json.EndObject();
    6646  }
    6647 
    6648  if(stat.unusedRangeCount > 1)
    6649  {
    6650  json.WriteString("UnusedRangeSize");
    6651  json.BeginObject(true);
    6652  json.WriteString("Min");
    6653  json.WriteNumber(stat.unusedRangeSizeMin);
    6654  json.WriteString("Avg");
    6655  json.WriteNumber(stat.unusedRangeSizeAvg);
    6656  json.WriteString("Max");
    6657  json.WriteNumber(stat.unusedRangeSizeMax);
    6658  json.EndObject();
    6659  }
    6660 
    6661  json.EndObject();
    6662 }
    6663 
    6664 #endif // #if VMA_STATS_STRING_ENABLED
    6665 
    6666 struct VmaSuballocationItemSizeLess
    6667 {
    6668  bool operator()(
    6669  const VmaSuballocationList::iterator lhs,
    6670  const VmaSuballocationList::iterator rhs) const
    6671  {
    6672  return lhs->size < rhs->size;
    6673  }
    6674  bool operator()(
    6675  const VmaSuballocationList::iterator lhs,
    6676  VkDeviceSize rhsSize) const
    6677  {
    6678  return lhs->size < rhsSize;
    6679  }
    6680 };
    6681 
    6682 
    6684 // class VmaBlockMetadata
    6685 
    6686 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    6687  m_Size(0),
    6688  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
    6689 {
    6690 }
    6691 
    6692 #if VMA_STATS_STRING_ENABLED
    6693 
    6694 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
    6695  VkDeviceSize unusedBytes,
    6696  size_t allocationCount,
    6697  size_t unusedRangeCount) const
    6698 {
    6699  json.BeginObject();
    6700 
    6701  json.WriteString("TotalBytes");
    6702  json.WriteNumber(GetSize());
    6703 
    6704  json.WriteString("UnusedBytes");
    6705  json.WriteNumber(unusedBytes);
    6706 
    6707  json.WriteString("Allocations");
    6708  json.WriteNumber((uint64_t)allocationCount);
    6709 
    6710  json.WriteString("UnusedRanges");
    6711  json.WriteNumber((uint64_t)unusedRangeCount);
    6712 
    6713  json.WriteString("Suballocations");
    6714  json.BeginArray();
    6715 }
    6716 
    6717 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    6718  VkDeviceSize offset,
    6719  VmaAllocation hAllocation) const
    6720 {
    6721  json.BeginObject(true);
    6722 
    6723  json.WriteString("Offset");
    6724  json.WriteNumber(offset);
    6725 
    6726  hAllocation->PrintParameters(json);
    6727 
    6728  json.EndObject();
    6729 }
    6730 
    6731 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    6732  VkDeviceSize offset,
    6733  VkDeviceSize size) const
    6734 {
    6735  json.BeginObject(true);
    6736 
    6737  json.WriteString("Offset");
    6738  json.WriteNumber(offset);
    6739 
    6740  json.WriteString("Type");
    6741  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
    6742 
    6743  json.WriteString("Size");
    6744  json.WriteNumber(size);
    6745 
    6746  json.EndObject();
    6747 }
    6748 
    6749 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
    6750 {
    6751  json.EndArray();
    6752  json.EndObject();
    6753 }
    6754 
    6755 #endif // #if VMA_STATS_STRING_ENABLED
    6756 
    6758 // class VmaBlockMetadata_Generic
    6759 
    6760 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
    6761  VmaBlockMetadata(hAllocator),
    6762  m_FreeCount(0),
    6763  m_SumFreeSize(0),
    6764  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    6765  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    6766 {
    6767 }
    6768 
    6769 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
    6770 {
    6771 }
    6772 
    6773 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
    6774 {
    6775  VmaBlockMetadata::Init(size);
    6776 
    6777  m_FreeCount = 1;
    6778  m_SumFreeSize = size;
    6779 
    6780  VmaSuballocation suballoc = {};
    6781  suballoc.offset = 0;
    6782  suballoc.size = size;
    6783  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    6784  suballoc.hAllocation = VK_NULL_HANDLE;
    6785 
    6786  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    6787  m_Suballocations.push_back(suballoc);
    6788  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    6789  --suballocItem;
    6790  m_FreeSuballocationsBySize.push_back(suballocItem);
    6791 }
    6792 
    6793 bool VmaBlockMetadata_Generic::Validate() const
    6794 {
    6795  VMA_VALIDATE(!m_Suballocations.empty());
    6796 
    6797  // Expected offset of new suballocation as calculated from previous ones.
    6798  VkDeviceSize calculatedOffset = 0;
    6799  // Expected number of free suballocations as calculated from traversing their list.
    6800  uint32_t calculatedFreeCount = 0;
    6801  // Expected sum size of free suballocations as calculated from traversing their list.
    6802  VkDeviceSize calculatedSumFreeSize = 0;
    6803  // Expected number of free suballocations that should be registered in
    6804  // m_FreeSuballocationsBySize calculated from traversing their list.
    6805  size_t freeSuballocationsToRegister = 0;
    6806  // True if previous visited suballocation was free.
    6807  bool prevFree = false;
    6808 
    6809  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6810  suballocItem != m_Suballocations.cend();
    6811  ++suballocItem)
    6812  {
    6813  const VmaSuballocation& subAlloc = *suballocItem;
    6814 
    6815  // Actual offset of this suballocation doesn't match expected one.
    6816  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
    6817 
    6818  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    6819  // Two adjacent free suballocations are invalid. They should be merged.
    6820  VMA_VALIDATE(!prevFree || !currFree);
    6821 
    6822  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
    6823 
    6824  if(currFree)
    6825  {
    6826  calculatedSumFreeSize += subAlloc.size;
    6827  ++calculatedFreeCount;
    6828  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6829  {
    6830  ++freeSuballocationsToRegister;
    6831  }
    6832 
    6833  // Margin required between allocations - every free space must be at least that large.
    6834  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
    6835  }
    6836  else
    6837  {
    6838  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
    6839  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
    6840 
    6841  // Margin required between allocations - previous allocation must be free.
    6842  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
    6843  }
    6844 
    6845  calculatedOffset += subAlloc.size;
    6846  prevFree = currFree;
    6847  }
    6848 
    6849  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    6850  // match expected one.
    6851  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
    6852 
    6853  VkDeviceSize lastSize = 0;
    6854  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    6855  {
    6856  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    6857 
    6858  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    6859  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    6860  // They must be sorted by size ascending.
    6861  VMA_VALIDATE(suballocItem->size >= lastSize);
    6862 
    6863  lastSize = suballocItem->size;
    6864  }
    6865 
    6866  // Check if totals match calculacted values.
    6867  VMA_VALIDATE(ValidateFreeSuballocationList());
    6868  VMA_VALIDATE(calculatedOffset == GetSize());
    6869  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
    6870  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
    6871 
    6872  return true;
    6873 }
    6874 
    6875 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
    6876 {
    6877  if(!m_FreeSuballocationsBySize.empty())
    6878  {
    6879  return m_FreeSuballocationsBySize.back()->size;
    6880  }
    6881  else
    6882  {
    6883  return 0;
    6884  }
    6885 }
    6886 
    6887 bool VmaBlockMetadata_Generic::IsEmpty() const
    6888 {
    6889  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    6890 }
    6891 
    6892 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    6893 {
    6894  outInfo.blockCount = 1;
    6895 
    6896  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6897  outInfo.allocationCount = rangeCount - m_FreeCount;
    6898  outInfo.unusedRangeCount = m_FreeCount;
    6899 
    6900  outInfo.unusedBytes = m_SumFreeSize;
    6901  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
    6902 
    6903  outInfo.allocationSizeMin = UINT64_MAX;
    6904  outInfo.allocationSizeMax = 0;
    6905  outInfo.unusedRangeSizeMin = UINT64_MAX;
    6906  outInfo.unusedRangeSizeMax = 0;
    6907 
    6908  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6909  suballocItem != m_Suballocations.cend();
    6910  ++suballocItem)
    6911  {
    6912  const VmaSuballocation& suballoc = *suballocItem;
    6913  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    6914  {
    6915  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    6916  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    6917  }
    6918  else
    6919  {
    6920  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    6921  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    6922  }
    6923  }
    6924 }
    6925 
    6926 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
    6927 {
    6928  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6929 
    6930  inoutStats.size += GetSize();
    6931  inoutStats.unusedSize += m_SumFreeSize;
    6932  inoutStats.allocationCount += rangeCount - m_FreeCount;
    6933  inoutStats.unusedRangeCount += m_FreeCount;
    6934  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    6935 }
    6936 
    6937 #if VMA_STATS_STRING_ENABLED
    6938 
    6939 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
    6940 {
    6941  PrintDetailedMap_Begin(json,
    6942  m_SumFreeSize, // unusedBytes
    6943  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
    6944  m_FreeCount); // unusedRangeCount
    6945 
    6946  size_t i = 0;
    6947  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6948  suballocItem != m_Suballocations.cend();
    6949  ++suballocItem, ++i)
    6950  {
    6951  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    6952  {
    6953  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
    6954  }
    6955  else
    6956  {
    6957  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
    6958  }
    6959  }
    6960 
    6961  PrintDetailedMap_End(json);
    6962 }
    6963 
    6964 #endif // #if VMA_STATS_STRING_ENABLED
    6965 
    6966 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
    6967  uint32_t currentFrameIndex,
    6968  uint32_t frameInUseCount,
    6969  VkDeviceSize bufferImageGranularity,
    6970  VkDeviceSize allocSize,
    6971  VkDeviceSize allocAlignment,
    6972  bool upperAddress,
    6973  VmaSuballocationType allocType,
    6974  bool canMakeOtherLost,
    6975  uint32_t strategy,
    6976  VmaAllocationRequest* pAllocationRequest)
    6977 {
    6978  VMA_ASSERT(allocSize > 0);
    6979  VMA_ASSERT(!upperAddress);
    6980  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    6981  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    6982  VMA_HEAVY_ASSERT(Validate());
    6983 
    6984  // There is not enough total free space in this block to fullfill the request: Early return.
    6985  if(canMakeOtherLost == false &&
    6986  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
    6987  {
    6988  return false;
    6989  }
    6990 
    6991  // New algorithm, efficiently searching freeSuballocationsBySize.
    6992  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    6993  if(freeSuballocCount > 0)
    6994  {
    6996  {
    6997  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
    6998  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    6999  m_FreeSuballocationsBySize.data(),
    7000  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    7001  allocSize + 2 * VMA_DEBUG_MARGIN,
    7002  VmaSuballocationItemSizeLess());
    7003  size_t index = it - m_FreeSuballocationsBySize.data();
    7004  for(; index < freeSuballocCount; ++index)
    7005  {
    7006  if(CheckAllocation(
    7007  currentFrameIndex,
    7008  frameInUseCount,
    7009  bufferImageGranularity,
    7010  allocSize,
    7011  allocAlignment,
    7012  allocType,
    7013  m_FreeSuballocationsBySize[index],
    7014  false, // canMakeOtherLost
    7015  &pAllocationRequest->offset,
    7016  &pAllocationRequest->itemsToMakeLostCount,
    7017  &pAllocationRequest->sumFreeSize,
    7018  &pAllocationRequest->sumItemSize))
    7019  {
    7020  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    7021  return true;
    7022  }
    7023  }
    7024  }
    7025  else // WORST_FIT, FIRST_FIT
    7026  {
    7027  // Search staring from biggest suballocations.
    7028  for(size_t index = freeSuballocCount; index--; )
    7029  {
    7030  if(CheckAllocation(
    7031  currentFrameIndex,
    7032  frameInUseCount,
    7033  bufferImageGranularity,
    7034  allocSize,
    7035  allocAlignment,
    7036  allocType,
    7037  m_FreeSuballocationsBySize[index],
    7038  false, // canMakeOtherLost
    7039  &pAllocationRequest->offset,
    7040  &pAllocationRequest->itemsToMakeLostCount,
    7041  &pAllocationRequest->sumFreeSize,
    7042  &pAllocationRequest->sumItemSize))
    7043  {
    7044  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    7045  return true;
    7046  }
    7047  }
    7048  }
    7049  }
    7050 
    7051  if(canMakeOtherLost)
    7052  {
    7053  // Brute-force algorithm. TODO: Come up with something better.
    7054 
    7055  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
    7056  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
    7057 
    7058  VmaAllocationRequest tmpAllocRequest = {};
    7059  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    7060  suballocIt != m_Suballocations.end();
    7061  ++suballocIt)
    7062  {
    7063  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    7064  suballocIt->hAllocation->CanBecomeLost())
    7065  {
    7066  if(CheckAllocation(
    7067  currentFrameIndex,
    7068  frameInUseCount,
    7069  bufferImageGranularity,
    7070  allocSize,
    7071  allocAlignment,
    7072  allocType,
    7073  suballocIt,
    7074  canMakeOtherLost,
    7075  &tmpAllocRequest.offset,
    7076  &tmpAllocRequest.itemsToMakeLostCount,
    7077  &tmpAllocRequest.sumFreeSize,
    7078  &tmpAllocRequest.sumItemSize))
    7079  {
    7080  tmpAllocRequest.item = suballocIt;
    7081 
    7082  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
    7084  {
    7085  *pAllocationRequest = tmpAllocRequest;
    7086  }
    7087  }
    7088  }
    7089  }
    7090 
    7091  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
    7092  {
    7093  return true;
    7094  }
    7095  }
    7096 
    7097  return false;
    7098 }
    7099 
    7100 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
    7101  uint32_t currentFrameIndex,
    7102  uint32_t frameInUseCount,
    7103  VmaAllocationRequest* pAllocationRequest)
    7104 {
    7105  while(pAllocationRequest->itemsToMakeLostCount > 0)
    7106  {
    7107  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    7108  {
    7109  ++pAllocationRequest->item;
    7110  }
    7111  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7112  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    7113  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    7114  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7115  {
    7116  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    7117  --pAllocationRequest->itemsToMakeLostCount;
    7118  }
    7119  else
    7120  {
    7121  return false;
    7122  }
    7123  }
    7124 
    7125  VMA_HEAVY_ASSERT(Validate());
    7126  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7127  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7128 
    7129  return true;
    7130 }
    7131 
    7132 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    7133 {
    7134  uint32_t lostAllocationCount = 0;
    7135  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7136  it != m_Suballocations.end();
    7137  ++it)
    7138  {
    7139  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    7140  it->hAllocation->CanBecomeLost() &&
    7141  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7142  {
    7143  it = FreeSuballocation(it);
    7144  ++lostAllocationCount;
    7145  }
    7146  }
    7147  return lostAllocationCount;
    7148 }
    7149 
    7150 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
    7151 {
    7152  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7153  it != m_Suballocations.end();
    7154  ++it)
    7155  {
    7156  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    7157  {
    7158  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
    7159  {
    7160  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    7161  return VK_ERROR_VALIDATION_FAILED_EXT;
    7162  }
    7163  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
    7164  {
    7165  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    7166  return VK_ERROR_VALIDATION_FAILED_EXT;
    7167  }
    7168  }
    7169  }
    7170 
    7171  return VK_SUCCESS;
    7172 }
    7173 
    7174 void VmaBlockMetadata_Generic::Alloc(
    7175  const VmaAllocationRequest& request,
    7176  VmaSuballocationType type,
    7177  VkDeviceSize allocSize,
    7178  bool upperAddress,
    7179  VmaAllocation hAllocation)
    7180 {
    7181  VMA_ASSERT(!upperAddress);
    7182  VMA_ASSERT(request.item != m_Suballocations.end());
    7183  VmaSuballocation& suballoc = *request.item;
    7184  // Given suballocation is a free block.
    7185  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7186  // Given offset is inside this suballocation.
    7187  VMA_ASSERT(request.offset >= suballoc.offset);
    7188  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    7189  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    7190  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    7191 
    7192  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    7193  // it to become used.
    7194  UnregisterFreeSuballocation(request.item);
    7195 
    7196  suballoc.offset = request.offset;
    7197  suballoc.size = allocSize;
    7198  suballoc.type = type;
    7199  suballoc.hAllocation = hAllocation;
    7200 
    7201  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    7202  if(paddingEnd)
    7203  {
    7204  VmaSuballocation paddingSuballoc = {};
    7205  paddingSuballoc.offset = request.offset + allocSize;
    7206  paddingSuballoc.size = paddingEnd;
    7207  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7208  VmaSuballocationList::iterator next = request.item;
    7209  ++next;
    7210  const VmaSuballocationList::iterator paddingEndItem =
    7211  m_Suballocations.insert(next, paddingSuballoc);
    7212  RegisterFreeSuballocation(paddingEndItem);
    7213  }
    7214 
    7215  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    7216  if(paddingBegin)
    7217  {
    7218  VmaSuballocation paddingSuballoc = {};
    7219  paddingSuballoc.offset = request.offset - paddingBegin;
    7220  paddingSuballoc.size = paddingBegin;
    7221  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7222  const VmaSuballocationList::iterator paddingBeginItem =
    7223  m_Suballocations.insert(request.item, paddingSuballoc);
    7224  RegisterFreeSuballocation(paddingBeginItem);
    7225  }
    7226 
    7227  // Update totals.
    7228  m_FreeCount = m_FreeCount - 1;
    7229  if(paddingBegin > 0)
    7230  {
    7231  ++m_FreeCount;
    7232  }
    7233  if(paddingEnd > 0)
    7234  {
    7235  ++m_FreeCount;
    7236  }
    7237  m_SumFreeSize -= allocSize;
    7238 }
    7239 
    7240 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
    7241 {
    7242  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7243  suballocItem != m_Suballocations.end();
    7244  ++suballocItem)
    7245  {
    7246  VmaSuballocation& suballoc = *suballocItem;
    7247  if(suballoc.hAllocation == allocation)
    7248  {
    7249  FreeSuballocation(suballocItem);
    7250  VMA_HEAVY_ASSERT(Validate());
    7251  return;
    7252  }
    7253  }
    7254  VMA_ASSERT(0 && "Not found!");
    7255 }
    7256 
    7257 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
    7258 {
    7259  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7260  suballocItem != m_Suballocations.end();
    7261  ++suballocItem)
    7262  {
    7263  VmaSuballocation& suballoc = *suballocItem;
    7264  if(suballoc.offset == offset)
    7265  {
    7266  FreeSuballocation(suballocItem);
    7267  return;
    7268  }
    7269  }
    7270  VMA_ASSERT(0 && "Not found!");
    7271 }
    7272 
    7273 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
    7274 {
    7275  VkDeviceSize lastSize = 0;
    7276  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    7277  {
    7278  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    7279 
    7280  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
    7281  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    7282  VMA_VALIDATE(it->size >= lastSize);
    7283  lastSize = it->size;
    7284  }
    7285  return true;
    7286 }
    7287 
    7288 bool VmaBlockMetadata_Generic::CheckAllocation(
    7289  uint32_t currentFrameIndex,
    7290  uint32_t frameInUseCount,
    7291  VkDeviceSize bufferImageGranularity,
    7292  VkDeviceSize allocSize,
    7293  VkDeviceSize allocAlignment,
    7294  VmaSuballocationType allocType,
    7295  VmaSuballocationList::const_iterator suballocItem,
    7296  bool canMakeOtherLost,
    7297  VkDeviceSize* pOffset,
    7298  size_t* itemsToMakeLostCount,
    7299  VkDeviceSize* pSumFreeSize,
    7300  VkDeviceSize* pSumItemSize) const
    7301 {
    7302  VMA_ASSERT(allocSize > 0);
    7303  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    7304  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    7305  VMA_ASSERT(pOffset != VMA_NULL);
    7306 
    7307  *itemsToMakeLostCount = 0;
    7308  *pSumFreeSize = 0;
    7309  *pSumItemSize = 0;
    7310 
    7311  if(canMakeOtherLost)
    7312  {
    7313  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7314  {
    7315  *pSumFreeSize = suballocItem->size;
    7316  }
    7317  else
    7318  {
    7319  if(suballocItem->hAllocation->CanBecomeLost() &&
    7320  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7321  {
    7322  ++*itemsToMakeLostCount;
    7323  *pSumItemSize = suballocItem->size;
    7324  }
    7325  else
    7326  {
    7327  return false;
    7328  }
    7329  }
    7330 
    7331  // Remaining size is too small for this request: Early return.
    7332  if(GetSize() - suballocItem->offset < allocSize)
    7333  {
    7334  return false;
    7335  }
    7336 
    7337  // Start from offset equal to beginning of this suballocation.
    7338  *pOffset = suballocItem->offset;
    7339 
    7340  // Apply VMA_DEBUG_MARGIN at the beginning.
    7341  if(VMA_DEBUG_MARGIN > 0)
    7342  {
    7343  *pOffset += VMA_DEBUG_MARGIN;
    7344  }
    7345 
    7346  // Apply alignment.
    7347  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7348 
    7349  // Check previous suballocations for BufferImageGranularity conflicts.
    7350  // Make bigger alignment if necessary.
    7351  if(bufferImageGranularity > 1)
    7352  {
    7353  bool bufferImageGranularityConflict = false;
    7354  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7355  while(prevSuballocItem != m_Suballocations.cbegin())
    7356  {
    7357  --prevSuballocItem;
    7358  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7359  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7360  {
    7361  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7362  {
    7363  bufferImageGranularityConflict = true;
    7364  break;
    7365  }
    7366  }
    7367  else
    7368  // Already on previous page.
    7369  break;
    7370  }
    7371  if(bufferImageGranularityConflict)
    7372  {
    7373  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7374  }
    7375  }
    7376 
    7377  // Now that we have final *pOffset, check if we are past suballocItem.
    7378  // If yes, return false - this function should be called for another suballocItem as starting point.
    7379  if(*pOffset >= suballocItem->offset + suballocItem->size)
    7380  {
    7381  return false;
    7382  }
    7383 
    7384  // Calculate padding at the beginning based on current offset.
    7385  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    7386 
    7387  // Calculate required margin at the end.
    7388  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7389 
    7390  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    7391  // Another early return check.
    7392  if(suballocItem->offset + totalSize > GetSize())
    7393  {
    7394  return false;
    7395  }
    7396 
    7397  // Advance lastSuballocItem until desired size is reached.
    7398  // Update itemsToMakeLostCount.
    7399  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    7400  if(totalSize > suballocItem->size)
    7401  {
    7402  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    7403  while(remainingSize > 0)
    7404  {
    7405  ++lastSuballocItem;
    7406  if(lastSuballocItem == m_Suballocations.cend())
    7407  {
    7408  return false;
    7409  }
    7410  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7411  {
    7412  *pSumFreeSize += lastSuballocItem->size;
    7413  }
    7414  else
    7415  {
    7416  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    7417  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    7418  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7419  {
    7420  ++*itemsToMakeLostCount;
    7421  *pSumItemSize += lastSuballocItem->size;
    7422  }
    7423  else
    7424  {
    7425  return false;
    7426  }
    7427  }
    7428  remainingSize = (lastSuballocItem->size < remainingSize) ?
    7429  remainingSize - lastSuballocItem->size : 0;
    7430  }
    7431  }
    7432 
    7433  // Check next suballocations for BufferImageGranularity conflicts.
    7434  // If conflict exists, we must mark more allocations lost or fail.
    7435  if(bufferImageGranularity > 1)
    7436  {
    7437  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    7438  ++nextSuballocItem;
    7439  while(nextSuballocItem != m_Suballocations.cend())
    7440  {
    7441  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7442  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7443  {
    7444  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7445  {
    7446  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    7447  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    7448  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7449  {
    7450  ++*itemsToMakeLostCount;
    7451  }
    7452  else
    7453  {
    7454  return false;
    7455  }
    7456  }
    7457  }
    7458  else
    7459  {
    7460  // Already on next page.
    7461  break;
    7462  }
    7463  ++nextSuballocItem;
    7464  }
    7465  }
    7466  }
    7467  else
    7468  {
    7469  const VmaSuballocation& suballoc = *suballocItem;
    7470  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7471 
    7472  *pSumFreeSize = suballoc.size;
    7473 
    7474  // Size of this suballocation is too small for this request: Early return.
    7475  if(suballoc.size < allocSize)
    7476  {
    7477  return false;
    7478  }
    7479 
    7480  // Start from offset equal to beginning of this suballocation.
    7481  *pOffset = suballoc.offset;
    7482 
    7483  // Apply VMA_DEBUG_MARGIN at the beginning.
    7484  if(VMA_DEBUG_MARGIN > 0)
    7485  {
    7486  *pOffset += VMA_DEBUG_MARGIN;
    7487  }
    7488 
    7489  // Apply alignment.
    7490  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7491 
    7492  // Check previous suballocations for BufferImageGranularity conflicts.
    7493  // Make bigger alignment if necessary.
    7494  if(bufferImageGranularity > 1)
    7495  {
    7496  bool bufferImageGranularityConflict = false;
    7497  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7498  while(prevSuballocItem != m_Suballocations.cbegin())
    7499  {
    7500  --prevSuballocItem;
    7501  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7502  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7503  {
    7504  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7505  {
    7506  bufferImageGranularityConflict = true;
    7507  break;
    7508  }
    7509  }
    7510  else
    7511  // Already on previous page.
    7512  break;
    7513  }
    7514  if(bufferImageGranularityConflict)
    7515  {
    7516  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7517  }
    7518  }
    7519 
    7520  // Calculate padding at the beginning based on current offset.
    7521  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    7522 
    7523  // Calculate required margin at the end.
    7524  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7525 
    7526  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    7527  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    7528  {
    7529  return false;
    7530  }
    7531 
    7532  // Check next suballocations for BufferImageGranularity conflicts.
    7533  // If conflict exists, allocation cannot be made here.
    7534  if(bufferImageGranularity > 1)
    7535  {
    7536  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    7537  ++nextSuballocItem;
    7538  while(nextSuballocItem != m_Suballocations.cend())
    7539  {
    7540  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7541  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7542  {
    7543  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7544  {
    7545  return false;
    7546  }
    7547  }
    7548  else
    7549  {
    7550  // Already on next page.
    7551  break;
    7552  }
    7553  ++nextSuballocItem;
    7554  }
    7555  }
    7556  }
    7557 
    7558  // All tests passed: Success. pOffset is already filled.
    7559  return true;
    7560 }
    7561 
    7562 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
    7563 {
    7564  VMA_ASSERT(item != m_Suballocations.end());
    7565  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7566 
    7567  VmaSuballocationList::iterator nextItem = item;
    7568  ++nextItem;
    7569  VMA_ASSERT(nextItem != m_Suballocations.end());
    7570  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    7571 
    7572  item->size += nextItem->size;
    7573  --m_FreeCount;
    7574  m_Suballocations.erase(nextItem);
    7575 }
    7576 
    7577 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    7578 {
    7579  // Change this suballocation to be marked as free.
    7580  VmaSuballocation& suballoc = *suballocItem;
    7581  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7582  suballoc.hAllocation = VK_NULL_HANDLE;
    7583 
    7584  // Update totals.
    7585  ++m_FreeCount;
    7586  m_SumFreeSize += suballoc.size;
    7587 
    7588  // Merge with previous and/or next suballocation if it's also free.
    7589  bool mergeWithNext = false;
    7590  bool mergeWithPrev = false;
    7591 
    7592  VmaSuballocationList::iterator nextItem = suballocItem;
    7593  ++nextItem;
    7594  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    7595  {
    7596  mergeWithNext = true;
    7597  }
    7598 
    7599  VmaSuballocationList::iterator prevItem = suballocItem;
    7600  if(suballocItem != m_Suballocations.begin())
    7601  {
    7602  --prevItem;
    7603  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7604  {
    7605  mergeWithPrev = true;
    7606  }
    7607  }
    7608 
    7609  if(mergeWithNext)
    7610  {
    7611  UnregisterFreeSuballocation(nextItem);
    7612  MergeFreeWithNext(suballocItem);
    7613  }
    7614 
    7615  if(mergeWithPrev)
    7616  {
    7617  UnregisterFreeSuballocation(prevItem);
    7618  MergeFreeWithNext(prevItem);
    7619  RegisterFreeSuballocation(prevItem);
    7620  return prevItem;
    7621  }
    7622  else
    7623  {
    7624  RegisterFreeSuballocation(suballocItem);
    7625  return suballocItem;
    7626  }
    7627 }
    7628 
    7629 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    7630 {
    7631  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7632  VMA_ASSERT(item->size > 0);
    7633 
    7634  // You may want to enable this validation at the beginning or at the end of
    7635  // this function, depending on what do you want to check.
    7636  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7637 
    7638  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7639  {
    7640  if(m_FreeSuballocationsBySize.empty())
    7641  {
    7642  m_FreeSuballocationsBySize.push_back(item);
    7643  }
    7644  else
    7645  {
    7646  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    7647  }
    7648  }
    7649 
    7650  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7651 }
    7652 
    7653 
    7654 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    7655 {
    7656  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7657  VMA_ASSERT(item->size > 0);
    7658 
    7659  // You may want to enable this validation at the beginning or at the end of
    7660  // this function, depending on what do you want to check.
    7661  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7662 
    7663  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7664  {
    7665  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7666  m_FreeSuballocationsBySize.data(),
    7667  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    7668  item,
    7669  VmaSuballocationItemSizeLess());
    7670  for(size_t index = it - m_FreeSuballocationsBySize.data();
    7671  index < m_FreeSuballocationsBySize.size();
    7672  ++index)
    7673  {
    7674  if(m_FreeSuballocationsBySize[index] == item)
    7675  {
    7676  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    7677  return;
    7678  }
    7679  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    7680  }
    7681  VMA_ASSERT(0 && "Not found.");
    7682  }
    7683 
    7684  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7685 }
    7686 
    7688 // class VmaBlockMetadata_Linear
    7689 
    7690 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
    7691  VmaBlockMetadata(hAllocator),
    7692  m_SumFreeSize(0),
    7693  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7694  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7695  m_1stVectorIndex(0),
    7696  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
    7697  m_1stNullItemsBeginCount(0),
    7698  m_1stNullItemsMiddleCount(0),
    7699  m_2ndNullItemsCount(0)
    7700 {
    7701 }
    7702 
    7703 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
    7704 {
    7705 }
    7706 
    7707 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
    7708 {
    7709  VmaBlockMetadata::Init(size);
    7710  m_SumFreeSize = size;
    7711 }
    7712 
    7713 bool VmaBlockMetadata_Linear::Validate() const
    7714 {
    7715  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7716  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7717 
    7718  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
    7719  VMA_VALIDATE(!suballocations1st.empty() ||
    7720  suballocations2nd.empty() ||
    7721  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
    7722 
    7723  if(!suballocations1st.empty())
    7724  {
    7725  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
    7726  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
    7727  // Null item at the end should be just pop_back().
    7728  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
    7729  }
    7730  if(!suballocations2nd.empty())
    7731  {
    7732  // Null item at the end should be just pop_back().
    7733  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
    7734  }
    7735 
    7736  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
    7737  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
    7738 
    7739  VkDeviceSize sumUsedSize = 0;
    7740  const size_t suballoc1stCount = suballocations1st.size();
    7741  VkDeviceSize offset = VMA_DEBUG_MARGIN;
    7742 
    7743  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    7744  {
    7745  const size_t suballoc2ndCount = suballocations2nd.size();
    7746  size_t nullItem2ndCount = 0;
    7747  for(size_t i = 0; i < suballoc2ndCount; ++i)
    7748  {
    7749  const VmaSuballocation& suballoc = suballocations2nd[i];
    7750  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7751 
    7752  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7753  VMA_VALIDATE(suballoc.offset >= offset);
    7754 
    7755  if(!currFree)
    7756  {
    7757  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7758  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7759  sumUsedSize += suballoc.size;
    7760  }
    7761  else
    7762  {
    7763  ++nullItem2ndCount;
    7764  }
    7765 
    7766  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7767  }
    7768 
    7769  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7770  }
    7771 
    7772  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
    7773  {
    7774  const VmaSuballocation& suballoc = suballocations1st[i];
    7775  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
    7776  suballoc.hAllocation == VK_NULL_HANDLE);
    7777  }
    7778 
    7779  size_t nullItem1stCount = m_1stNullItemsBeginCount;
    7780 
    7781  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
    7782  {
    7783  const VmaSuballocation& suballoc = suballocations1st[i];
    7784  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7785 
    7786  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7787  VMA_VALIDATE(suballoc.offset >= offset);
    7788  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
    7789 
    7790  if(!currFree)
    7791  {
    7792  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7793  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7794  sumUsedSize += suballoc.size;
    7795  }
    7796  else
    7797  {
    7798  ++nullItem1stCount;
    7799  }
    7800 
    7801  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7802  }
    7803  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
    7804 
    7805  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    7806  {
    7807  const size_t suballoc2ndCount = suballocations2nd.size();
    7808  size_t nullItem2ndCount = 0;
    7809  for(size_t i = suballoc2ndCount; i--; )
    7810  {
    7811  const VmaSuballocation& suballoc = suballocations2nd[i];
    7812  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7813 
    7814  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7815  VMA_VALIDATE(suballoc.offset >= offset);
    7816 
    7817  if(!currFree)
    7818  {
    7819  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7820  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7821  sumUsedSize += suballoc.size;
    7822  }
    7823  else
    7824  {
    7825  ++nullItem2ndCount;
    7826  }
    7827 
    7828  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7829  }
    7830 
    7831  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7832  }
    7833 
    7834  VMA_VALIDATE(offset <= GetSize());
    7835  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
    7836 
    7837  return true;
    7838 }
    7839 
    7840 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
    7841 {
    7842  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
    7843  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
    7844 }
    7845 
    7846 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
    7847 {
    7848  const VkDeviceSize size = GetSize();
    7849 
    7850  /*
    7851  We don't consider gaps inside allocation vectors with freed allocations because
    7852  they are not suitable for reuse in linear allocator. We consider only space that
    7853  is available for new allocations.
    7854  */
    7855  if(IsEmpty())
    7856  {
    7857  return size;
    7858  }
    7859 
    7860  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7861 
    7862  switch(m_2ndVectorMode)
    7863  {
    7864  case SECOND_VECTOR_EMPTY:
    7865  /*
    7866  Available space is after end of 1st, as well as before beginning of 1st (which
    7867  whould make it a ring buffer).
    7868  */
    7869  {
    7870  const size_t suballocations1stCount = suballocations1st.size();
    7871  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
    7872  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    7873  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
    7874  return VMA_MAX(
    7875  firstSuballoc.offset,
    7876  size - (lastSuballoc.offset + lastSuballoc.size));
    7877  }
    7878  break;
    7879 
    7880  case SECOND_VECTOR_RING_BUFFER:
    7881  /*
    7882  Available space is only between end of 2nd and beginning of 1st.
    7883  */
    7884  {
    7885  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7886  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
    7887  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
    7888  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
    7889  }
    7890  break;
    7891 
    7892  case SECOND_VECTOR_DOUBLE_STACK:
    7893  /*
    7894  Available space is only between end of 1st and top of 2nd.
    7895  */
    7896  {
    7897  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7898  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
    7899  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
    7900  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
    7901  }
    7902  break;
    7903 
    7904  default:
    7905  VMA_ASSERT(0);
    7906  return 0;
    7907  }
    7908 }
    7909 
    7910 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    7911 {
    7912  const VkDeviceSize size = GetSize();
    7913  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7914  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7915  const size_t suballoc1stCount = suballocations1st.size();
    7916  const size_t suballoc2ndCount = suballocations2nd.size();
    7917 
    7918  outInfo.blockCount = 1;
    7919  outInfo.allocationCount = (uint32_t)GetAllocationCount();
    7920  outInfo.unusedRangeCount = 0;
    7921  outInfo.usedBytes = 0;
    7922  outInfo.allocationSizeMin = UINT64_MAX;
    7923  outInfo.allocationSizeMax = 0;
    7924  outInfo.unusedRangeSizeMin = UINT64_MAX;
    7925  outInfo.unusedRangeSizeMax = 0;
    7926 
    7927  VkDeviceSize lastOffset = 0;
    7928 
    7929  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    7930  {
    7931  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    7932  size_t nextAlloc2ndIndex = 0;
    7933  while(lastOffset < freeSpace2ndTo1stEnd)
    7934  {
    7935  // Find next non-null allocation or move nextAllocIndex to the end.
    7936  while(nextAlloc2ndIndex < suballoc2ndCount &&
    7937  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    7938  {
    7939  ++nextAlloc2ndIndex;
    7940  }
    7941 
    7942  // Found non-null allocation.
    7943  if(nextAlloc2ndIndex < suballoc2ndCount)
    7944  {
    7945  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    7946 
    7947  // 1. Process free space before this allocation.
    7948  if(lastOffset < suballoc.offset)
    7949  {
    7950  // There is free space from lastOffset to suballoc.offset.
    7951  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    7952  ++outInfo.unusedRangeCount;
    7953  outInfo.unusedBytes += unusedRangeSize;
    7954  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7955  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7956  }
    7957 
    7958  // 2. Process this allocation.
    7959  // There is allocation with suballoc.offset, suballoc.size.
    7960  outInfo.usedBytes += suballoc.size;
    7961  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    7962  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    7963 
    7964  // 3. Prepare for next iteration.
    7965  lastOffset = suballoc.offset + suballoc.size;
    7966  ++nextAlloc2ndIndex;
    7967  }
    7968  // We are at the end.
    7969  else
    7970  {
    7971  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    7972  if(lastOffset < freeSpace2ndTo1stEnd)
    7973  {
    7974  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    7975  ++outInfo.unusedRangeCount;
    7976  outInfo.unusedBytes += unusedRangeSize;
    7977  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7978  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7979  }
    7980 
    7981  // End of loop.
    7982  lastOffset = freeSpace2ndTo1stEnd;
    7983  }
    7984  }
    7985  }
    7986 
    7987  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    7988  const VkDeviceSize freeSpace1stTo2ndEnd =
    7989  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    7990  while(lastOffset < freeSpace1stTo2ndEnd)
    7991  {
    7992  // Find next non-null allocation or move nextAllocIndex to the end.
    7993  while(nextAlloc1stIndex < suballoc1stCount &&
    7994  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    7995  {
    7996  ++nextAlloc1stIndex;
    7997  }
    7998 
    7999  // Found non-null allocation.
    8000  if(nextAlloc1stIndex < suballoc1stCount)
    8001  {
    8002  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8003 
    8004  // 1. Process free space before this allocation.
    8005  if(lastOffset < suballoc.offset)
    8006  {
    8007  // There is free space from lastOffset to suballoc.offset.
    8008  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8009  ++outInfo.unusedRangeCount;
    8010  outInfo.unusedBytes += unusedRangeSize;
    8011  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8012  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8013  }
    8014 
    8015  // 2. Process this allocation.
    8016  // There is allocation with suballoc.offset, suballoc.size.
    8017  outInfo.usedBytes += suballoc.size;
    8018  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8019  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8020 
    8021  // 3. Prepare for next iteration.
    8022  lastOffset = suballoc.offset + suballoc.size;
    8023  ++nextAlloc1stIndex;
    8024  }
    8025  // We are at the end.
    8026  else
    8027  {
    8028  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8029  if(lastOffset < freeSpace1stTo2ndEnd)
    8030  {
    8031  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8032  ++outInfo.unusedRangeCount;
    8033  outInfo.unusedBytes += unusedRangeSize;
    8034  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8035  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8036  }
    8037 
    8038  // End of loop.
    8039  lastOffset = freeSpace1stTo2ndEnd;
    8040  }
    8041  }
    8042 
    8043  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8044  {
    8045  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8046  while(lastOffset < size)
    8047  {
    8048  // Find next non-null allocation or move nextAllocIndex to the end.
    8049  while(nextAlloc2ndIndex != SIZE_MAX &&
    8050  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8051  {
    8052  --nextAlloc2ndIndex;
    8053  }
    8054 
    8055  // Found non-null allocation.
    8056  if(nextAlloc2ndIndex != SIZE_MAX)
    8057  {
    8058  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8059 
    8060  // 1. Process free space before this allocation.
    8061  if(lastOffset < suballoc.offset)
    8062  {
    8063  // There is free space from lastOffset to suballoc.offset.
    8064  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8065  ++outInfo.unusedRangeCount;
    8066  outInfo.unusedBytes += unusedRangeSize;
    8067  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8068  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8069  }
    8070 
    8071  // 2. Process this allocation.
    8072  // There is allocation with suballoc.offset, suballoc.size.
    8073  outInfo.usedBytes += suballoc.size;
    8074  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8075  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8076 
    8077  // 3. Prepare for next iteration.
    8078  lastOffset = suballoc.offset + suballoc.size;
    8079  --nextAlloc2ndIndex;
    8080  }
    8081  // We are at the end.
    8082  else
    8083  {
    8084  // There is free space from lastOffset to size.
    8085  if(lastOffset < size)
    8086  {
    8087  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8088  ++outInfo.unusedRangeCount;
    8089  outInfo.unusedBytes += unusedRangeSize;
    8090  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8091  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8092  }
    8093 
    8094  // End of loop.
    8095  lastOffset = size;
    8096  }
    8097  }
    8098  }
    8099 
    8100  outInfo.unusedBytes = size - outInfo.usedBytes;
    8101 }
    8102 
    8103 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
    8104 {
    8105  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8106  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8107  const VkDeviceSize size = GetSize();
    8108  const size_t suballoc1stCount = suballocations1st.size();
    8109  const size_t suballoc2ndCount = suballocations2nd.size();
    8110 
    8111  inoutStats.size += size;
    8112 
    8113  VkDeviceSize lastOffset = 0;
    8114 
    8115  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8116  {
    8117  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8118  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
    8119  while(lastOffset < freeSpace2ndTo1stEnd)
    8120  {
    8121  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8122  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8123  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8124  {
    8125  ++nextAlloc2ndIndex;
    8126  }
    8127 
    8128  // Found non-null allocation.
    8129  if(nextAlloc2ndIndex < suballoc2ndCount)
    8130  {
    8131  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8132 
    8133  // 1. Process free space before this allocation.
    8134  if(lastOffset < suballoc.offset)
    8135  {
    8136  // There is free space from lastOffset to suballoc.offset.
    8137  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8138  inoutStats.unusedSize += unusedRangeSize;
    8139  ++inoutStats.unusedRangeCount;
    8140  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8141  }
    8142 
    8143  // 2. Process this allocation.
    8144  // There is allocation with suballoc.offset, suballoc.size.
    8145  ++inoutStats.allocationCount;
    8146 
    8147  // 3. Prepare for next iteration.
    8148  lastOffset = suballoc.offset + suballoc.size;
    8149  ++nextAlloc2ndIndex;
    8150  }
    8151  // We are at the end.
    8152  else
    8153  {
    8154  if(lastOffset < freeSpace2ndTo1stEnd)
    8155  {
    8156  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8157  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8158  inoutStats.unusedSize += unusedRangeSize;
    8159  ++inoutStats.unusedRangeCount;
    8160  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8161  }
    8162 
    8163  // End of loop.
    8164  lastOffset = freeSpace2ndTo1stEnd;
    8165  }
    8166  }
    8167  }
    8168 
    8169  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8170  const VkDeviceSize freeSpace1stTo2ndEnd =
    8171  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8172  while(lastOffset < freeSpace1stTo2ndEnd)
    8173  {
    8174  // Find next non-null allocation or move nextAllocIndex to the end.
    8175  while(nextAlloc1stIndex < suballoc1stCount &&
    8176  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8177  {
    8178  ++nextAlloc1stIndex;
    8179  }
    8180 
    8181  // Found non-null allocation.
    8182  if(nextAlloc1stIndex < suballoc1stCount)
    8183  {
    8184  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8185 
    8186  // 1. Process free space before this allocation.
    8187  if(lastOffset < suballoc.offset)
    8188  {
    8189  // There is free space from lastOffset to suballoc.offset.
    8190  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8191  inoutStats.unusedSize += unusedRangeSize;
    8192  ++inoutStats.unusedRangeCount;
    8193  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8194  }
    8195 
    8196  // 2. Process this allocation.
    8197  // There is allocation with suballoc.offset, suballoc.size.
    8198  ++inoutStats.allocationCount;
    8199 
    8200  // 3. Prepare for next iteration.
    8201  lastOffset = suballoc.offset + suballoc.size;
    8202  ++nextAlloc1stIndex;
    8203  }
    8204  // We are at the end.
    8205  else
    8206  {
    8207  if(lastOffset < freeSpace1stTo2ndEnd)
    8208  {
    8209  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8210  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8211  inoutStats.unusedSize += unusedRangeSize;
    8212  ++inoutStats.unusedRangeCount;
    8213  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8214  }
    8215 
    8216  // End of loop.
    8217  lastOffset = freeSpace1stTo2ndEnd;
    8218  }
    8219  }
    8220 
    8221  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8222  {
    8223  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8224  while(lastOffset < size)
    8225  {
    8226  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8227  while(nextAlloc2ndIndex != SIZE_MAX &&
    8228  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8229  {
    8230  --nextAlloc2ndIndex;
    8231  }
    8232 
    8233  // Found non-null allocation.
    8234  if(nextAlloc2ndIndex != SIZE_MAX)
    8235  {
    8236  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8237 
    8238  // 1. Process free space before this allocation.
    8239  if(lastOffset < suballoc.offset)
    8240  {
    8241  // There is free space from lastOffset to suballoc.offset.
    8242  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8243  inoutStats.unusedSize += unusedRangeSize;
    8244  ++inoutStats.unusedRangeCount;
    8245  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8246  }
    8247 
    8248  // 2. Process this allocation.
    8249  // There is allocation with suballoc.offset, suballoc.size.
    8250  ++inoutStats.allocationCount;
    8251 
    8252  // 3. Prepare for next iteration.
    8253  lastOffset = suballoc.offset + suballoc.size;
    8254  --nextAlloc2ndIndex;
    8255  }
    8256  // We are at the end.
    8257  else
    8258  {
    8259  if(lastOffset < size)
    8260  {
    8261  // There is free space from lastOffset to size.
    8262  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8263  inoutStats.unusedSize += unusedRangeSize;
    8264  ++inoutStats.unusedRangeCount;
    8265  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8266  }
    8267 
    8268  // End of loop.
    8269  lastOffset = size;
    8270  }
    8271  }
    8272  }
    8273 }
    8274 
    8275 #if VMA_STATS_STRING_ENABLED
    8276 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
    8277 {
    8278  const VkDeviceSize size = GetSize();
    8279  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8280  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8281  const size_t suballoc1stCount = suballocations1st.size();
    8282  const size_t suballoc2ndCount = suballocations2nd.size();
    8283 
    8284  // FIRST PASS
    8285 
    8286  size_t unusedRangeCount = 0;
    8287  VkDeviceSize usedBytes = 0;
    8288 
    8289  VkDeviceSize lastOffset = 0;
    8290 
    8291  size_t alloc2ndCount = 0;
    8292  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8293  {
    8294  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8295  size_t nextAlloc2ndIndex = 0;
    8296  while(lastOffset < freeSpace2ndTo1stEnd)
    8297  {
    8298  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8299  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8300  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8301  {
    8302  ++nextAlloc2ndIndex;
    8303  }
    8304 
    8305  // Found non-null allocation.
    8306  if(nextAlloc2ndIndex < suballoc2ndCount)
    8307  {
    8308  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8309 
    8310  // 1. Process free space before this allocation.
    8311  if(lastOffset < suballoc.offset)
    8312  {
    8313  // There is free space from lastOffset to suballoc.offset.
    8314  ++unusedRangeCount;
    8315  }
    8316 
    8317  // 2. Process this allocation.
    8318  // There is allocation with suballoc.offset, suballoc.size.
    8319  ++alloc2ndCount;
    8320  usedBytes += suballoc.size;
    8321 
    8322  // 3. Prepare for next iteration.
    8323  lastOffset = suballoc.offset + suballoc.size;
    8324  ++nextAlloc2ndIndex;
    8325  }
    8326  // We are at the end.
    8327  else
    8328  {
    8329  if(lastOffset < freeSpace2ndTo1stEnd)
    8330  {
    8331  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8332  ++unusedRangeCount;
    8333  }
    8334 
    8335  // End of loop.
    8336  lastOffset = freeSpace2ndTo1stEnd;
    8337  }
    8338  }
    8339  }
    8340 
    8341  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8342  size_t alloc1stCount = 0;
    8343  const VkDeviceSize freeSpace1stTo2ndEnd =
    8344  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8345  while(lastOffset < freeSpace1stTo2ndEnd)
    8346  {
    8347  // Find next non-null allocation or move nextAllocIndex to the end.
    8348  while(nextAlloc1stIndex < suballoc1stCount &&
    8349  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8350  {
    8351  ++nextAlloc1stIndex;
    8352  }
    8353 
    8354  // Found non-null allocation.
    8355  if(nextAlloc1stIndex < suballoc1stCount)
    8356  {
    8357  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8358 
    8359  // 1. Process free space before this allocation.
    8360  if(lastOffset < suballoc.offset)
    8361  {
    8362  // There is free space from lastOffset to suballoc.offset.
    8363  ++unusedRangeCount;
    8364  }
    8365 
    8366  // 2. Process this allocation.
    8367  // There is allocation with suballoc.offset, suballoc.size.
    8368  ++alloc1stCount;
    8369  usedBytes += suballoc.size;
    8370 
    8371  // 3. Prepare for next iteration.
    8372  lastOffset = suballoc.offset + suballoc.size;
    8373  ++nextAlloc1stIndex;
    8374  }
    8375  // We are at the end.
    8376  else
    8377  {
    8378  if(lastOffset < size)
    8379  {
    8380  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8381  ++unusedRangeCount;
    8382  }
    8383 
    8384  // End of loop.
    8385  lastOffset = freeSpace1stTo2ndEnd;
    8386  }
    8387  }
    8388 
    8389  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8390  {
    8391  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8392  while(lastOffset < size)
    8393  {
    8394  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8395  while(nextAlloc2ndIndex != SIZE_MAX &&
    8396  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8397  {
    8398  --nextAlloc2ndIndex;
    8399  }
    8400 
    8401  // Found non-null allocation.
    8402  if(nextAlloc2ndIndex != SIZE_MAX)
    8403  {
    8404  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8405 
    8406  // 1. Process free space before this allocation.
    8407  if(lastOffset < suballoc.offset)
    8408  {
    8409  // There is free space from lastOffset to suballoc.offset.
    8410  ++unusedRangeCount;
    8411  }
    8412 
    8413  // 2. Process this allocation.
    8414  // There is allocation with suballoc.offset, suballoc.size.
    8415  ++alloc2ndCount;
    8416  usedBytes += suballoc.size;
    8417 
    8418  // 3. Prepare for next iteration.
    8419  lastOffset = suballoc.offset + suballoc.size;
    8420  --nextAlloc2ndIndex;
    8421  }
    8422  // We are at the end.
    8423  else
    8424  {
    8425  if(lastOffset < size)
    8426  {
    8427  // There is free space from lastOffset to size.
    8428  ++unusedRangeCount;
    8429  }
    8430 
    8431  // End of loop.
    8432  lastOffset = size;
    8433  }
    8434  }
    8435  }
    8436 
    8437  const VkDeviceSize unusedBytes = size - usedBytes;
    8438  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
    8439 
    8440  // SECOND PASS
    8441  lastOffset = 0;
    8442 
    8443  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8444  {
    8445  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8446  size_t nextAlloc2ndIndex = 0;
    8447  while(lastOffset < freeSpace2ndTo1stEnd)
    8448  {
    8449  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8450  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8451  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8452  {
    8453  ++nextAlloc2ndIndex;
    8454  }
    8455 
    8456  // Found non-null allocation.
    8457  if(nextAlloc2ndIndex < suballoc2ndCount)
    8458  {
    8459  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8460 
    8461  // 1. Process free space before this allocation.
    8462  if(lastOffset < suballoc.offset)
    8463  {
    8464  // There is free space from lastOffset to suballoc.offset.
    8465  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8466  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8467  }
    8468 
    8469  // 2. Process this allocation.
    8470  // There is allocation with suballoc.offset, suballoc.size.
    8471  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8472 
    8473  // 3. Prepare for next iteration.
    8474  lastOffset = suballoc.offset + suballoc.size;
    8475  ++nextAlloc2ndIndex;
    8476  }
    8477  // We are at the end.
    8478  else
    8479  {
    8480  if(lastOffset < freeSpace2ndTo1stEnd)
    8481  {
    8482  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8483  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8484  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8485  }
    8486 
    8487  // End of loop.
    8488  lastOffset = freeSpace2ndTo1stEnd;
    8489  }
    8490  }
    8491  }
    8492 
    8493  nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8494  while(lastOffset < freeSpace1stTo2ndEnd)
    8495  {
    8496  // Find next non-null allocation or move nextAllocIndex to the end.
    8497  while(nextAlloc1stIndex < suballoc1stCount &&
    8498  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8499  {
    8500  ++nextAlloc1stIndex;
    8501  }
    8502 
    8503  // Found non-null allocation.
    8504  if(nextAlloc1stIndex < suballoc1stCount)
    8505  {
    8506  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8507 
    8508  // 1. Process free space before this allocation.
    8509  if(lastOffset < suballoc.offset)
    8510  {
    8511  // There is free space from lastOffset to suballoc.offset.
    8512  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8513  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8514  }
    8515 
    8516  // 2. Process this allocation.
    8517  // There is allocation with suballoc.offset, suballoc.size.
    8518  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8519 
    8520  // 3. Prepare for next iteration.
    8521  lastOffset = suballoc.offset + suballoc.size;
    8522  ++nextAlloc1stIndex;
    8523  }
    8524  // We are at the end.
    8525  else
    8526  {
    8527  if(lastOffset < freeSpace1stTo2ndEnd)
    8528  {
    8529  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8530  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8531  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8532  }
    8533 
    8534  // End of loop.
    8535  lastOffset = freeSpace1stTo2ndEnd;
    8536  }
    8537  }
    8538 
    8539  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8540  {
    8541  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8542  while(lastOffset < size)
    8543  {
    8544  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8545  while(nextAlloc2ndIndex != SIZE_MAX &&
    8546  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8547  {
    8548  --nextAlloc2ndIndex;
    8549  }
    8550 
    8551  // Found non-null allocation.
    8552  if(nextAlloc2ndIndex != SIZE_MAX)
    8553  {
    8554  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8555 
    8556  // 1. Process free space before this allocation.
    8557  if(lastOffset < suballoc.offset)
    8558  {
    8559  // There is free space from lastOffset to suballoc.offset.
    8560  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8561  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8562  }
    8563 
    8564  // 2. Process this allocation.
    8565  // There is allocation with suballoc.offset, suballoc.size.
    8566  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8567 
    8568  // 3. Prepare for next iteration.
    8569  lastOffset = suballoc.offset + suballoc.size;
    8570  --nextAlloc2ndIndex;
    8571  }
    8572  // We are at the end.
    8573  else
    8574  {
    8575  if(lastOffset < size)
    8576  {
    8577  // There is free space from lastOffset to size.
    8578  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8579  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8580  }
    8581 
    8582  // End of loop.
    8583  lastOffset = size;
    8584  }
    8585  }
    8586  }
    8587 
    8588  PrintDetailedMap_End(json);
    8589 }
    8590 #endif // #if VMA_STATS_STRING_ENABLED
    8591 
    8592 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
    8593  uint32_t currentFrameIndex,
    8594  uint32_t frameInUseCount,
    8595  VkDeviceSize bufferImageGranularity,
    8596  VkDeviceSize allocSize,
    8597  VkDeviceSize allocAlignment,
    8598  bool upperAddress,
    8599  VmaSuballocationType allocType,
    8600  bool canMakeOtherLost,
    8601  uint32_t strategy,
    8602  VmaAllocationRequest* pAllocationRequest)
    8603 {
    8604  VMA_ASSERT(allocSize > 0);
    8605  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    8606  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    8607  VMA_HEAVY_ASSERT(Validate());
    8608 
    8609  const VkDeviceSize size = GetSize();
    8610  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8611  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8612 
    8613  if(upperAddress)
    8614  {
    8615  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8616  {
    8617  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
    8618  return false;
    8619  }
    8620 
    8621  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
    8622  if(allocSize > size)
    8623  {
    8624  return false;
    8625  }
    8626  VkDeviceSize resultBaseOffset = size - allocSize;
    8627  if(!suballocations2nd.empty())
    8628  {
    8629  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8630  resultBaseOffset = lastSuballoc.offset - allocSize;
    8631  if(allocSize > lastSuballoc.offset)
    8632  {
    8633  return false;
    8634  }
    8635  }
    8636 
    8637  // Start from offset equal to end of free space.
    8638  VkDeviceSize resultOffset = resultBaseOffset;
    8639 
    8640  // Apply VMA_DEBUG_MARGIN at the end.
    8641  if(VMA_DEBUG_MARGIN > 0)
    8642  {
    8643  if(resultOffset < VMA_DEBUG_MARGIN)
    8644  {
    8645  return false;
    8646  }
    8647  resultOffset -= VMA_DEBUG_MARGIN;
    8648  }
    8649 
    8650  // Apply alignment.
    8651  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
    8652 
    8653  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
    8654  // Make bigger alignment if necessary.
    8655  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8656  {
    8657  bool bufferImageGranularityConflict = false;
    8658  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8659  {
    8660  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8661  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8662  {
    8663  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
    8664  {
    8665  bufferImageGranularityConflict = true;
    8666  break;
    8667  }
    8668  }
    8669  else
    8670  // Already on previous page.
    8671  break;
    8672  }
    8673  if(bufferImageGranularityConflict)
    8674  {
    8675  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
    8676  }
    8677  }
    8678 
    8679  // There is enough free space.
    8680  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
    8681  suballocations1st.back().offset + suballocations1st.back().size :
    8682  0;
    8683  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
    8684  {
    8685  // Check previous suballocations for BufferImageGranularity conflicts.
    8686  // If conflict exists, allocation cannot be made here.
    8687  if(bufferImageGranularity > 1)
    8688  {
    8689  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8690  {
    8691  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8692  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8693  {
    8694  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
    8695  {
    8696  return false;
    8697  }
    8698  }
    8699  else
    8700  {
    8701  // Already on next page.
    8702  break;
    8703  }
    8704  }
    8705  }
    8706 
    8707  // All tests passed: Success.
    8708  pAllocationRequest->offset = resultOffset;
    8709  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
    8710  pAllocationRequest->sumItemSize = 0;
    8711  // pAllocationRequest->item unused.
    8712  pAllocationRequest->itemsToMakeLostCount = 0;
    8713  return true;
    8714  }
    8715  }
    8716  else // !upperAddress
    8717  {
    8718  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8719  {
    8720  // Try to allocate at the end of 1st vector.
    8721 
    8722  VkDeviceSize resultBaseOffset = 0;
    8723  if(!suballocations1st.empty())
    8724  {
    8725  const VmaSuballocation& lastSuballoc = suballocations1st.back();
    8726  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8727  }
    8728 
    8729  // Start from offset equal to beginning of free space.
    8730  VkDeviceSize resultOffset = resultBaseOffset;
    8731 
    8732  // Apply VMA_DEBUG_MARGIN at the beginning.
    8733  if(VMA_DEBUG_MARGIN > 0)
    8734  {
    8735  resultOffset += VMA_DEBUG_MARGIN;
    8736  }
    8737 
    8738  // Apply alignment.
    8739  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8740 
    8741  // Check previous suballocations for BufferImageGranularity conflicts.
    8742  // Make bigger alignment if necessary.
    8743  if(bufferImageGranularity > 1 && !suballocations1st.empty())
    8744  {
    8745  bool bufferImageGranularityConflict = false;
    8746  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8747  {
    8748  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8749  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8750  {
    8751  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8752  {
    8753  bufferImageGranularityConflict = true;
    8754  break;
    8755  }
    8756  }
    8757  else
    8758  // Already on previous page.
    8759  break;
    8760  }
    8761  if(bufferImageGranularityConflict)
    8762  {
    8763  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8764  }
    8765  }
    8766 
    8767  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
    8768  suballocations2nd.back().offset : size;
    8769 
    8770  // There is enough free space at the end after alignment.
    8771  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
    8772  {
    8773  // Check next suballocations for BufferImageGranularity conflicts.
    8774  // If conflict exists, allocation cannot be made here.
    8775  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8776  {
    8777  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8778  {
    8779  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8780  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8781  {
    8782  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8783  {
    8784  return false;
    8785  }
    8786  }
    8787  else
    8788  {
    8789  // Already on previous page.
    8790  break;
    8791  }
    8792  }
    8793  }
    8794 
    8795  // All tests passed: Success.
    8796  pAllocationRequest->offset = resultOffset;
    8797  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
    8798  pAllocationRequest->sumItemSize = 0;
    8799  // pAllocationRequest->item unused.
    8800  pAllocationRequest->itemsToMakeLostCount = 0;
    8801  return true;
    8802  }
    8803  }
    8804 
    8805  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
    8806  // beginning of 1st vector as the end of free space.
    8807  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8808  {
    8809  VMA_ASSERT(!suballocations1st.empty());
    8810 
    8811  VkDeviceSize resultBaseOffset = 0;
    8812  if(!suballocations2nd.empty())
    8813  {
    8814  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8815  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8816  }
    8817 
    8818  // Start from offset equal to beginning of free space.
    8819  VkDeviceSize resultOffset = resultBaseOffset;
    8820 
    8821  // Apply VMA_DEBUG_MARGIN at the beginning.
    8822  if(VMA_DEBUG_MARGIN > 0)
    8823  {
    8824  resultOffset += VMA_DEBUG_MARGIN;
    8825  }
    8826 
    8827  // Apply alignment.
    8828  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8829 
    8830  // Check previous suballocations for BufferImageGranularity conflicts.
    8831  // Make bigger alignment if necessary.
    8832  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8833  {
    8834  bool bufferImageGranularityConflict = false;
    8835  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
    8836  {
    8837  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
    8838  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8839  {
    8840  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8841  {
    8842  bufferImageGranularityConflict = true;
    8843  break;
    8844  }
    8845  }
    8846  else
    8847  // Already on previous page.
    8848  break;
    8849  }
    8850  if(bufferImageGranularityConflict)
    8851  {
    8852  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8853  }
    8854  }
    8855 
    8856  pAllocationRequest->itemsToMakeLostCount = 0;
    8857  pAllocationRequest->sumItemSize = 0;
    8858  size_t index1st = m_1stNullItemsBeginCount;
    8859 
    8860  if(canMakeOtherLost)
    8861  {
    8862  while(index1st < suballocations1st.size() &&
    8863  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
    8864  {
    8865  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
    8866  const VmaSuballocation& suballoc = suballocations1st[index1st];
    8867  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
    8868  {
    8869  // No problem.
    8870  }
    8871  else
    8872  {
    8873  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    8874  if(suballoc.hAllocation->CanBecomeLost() &&
    8875  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8876  {
    8877  ++pAllocationRequest->itemsToMakeLostCount;
    8878  pAllocationRequest->sumItemSize += suballoc.size;
    8879  }
    8880  else
    8881  {
    8882  return false;
    8883  }
    8884  }
    8885  ++index1st;
    8886  }
    8887 
    8888  // Check next suballocations for BufferImageGranularity conflicts.
    8889  // If conflict exists, we must mark more allocations lost or fail.
    8890  if(bufferImageGranularity > 1)
    8891  {
    8892  while(index1st < suballocations1st.size())
    8893  {
    8894  const VmaSuballocation& suballoc = suballocations1st[index1st];
    8895  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
    8896  {
    8897  if(suballoc.hAllocation != VK_NULL_HANDLE)
    8898  {
    8899  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
    8900  if(suballoc.hAllocation->CanBecomeLost() &&
    8901  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8902  {
    8903  ++pAllocationRequest->itemsToMakeLostCount;
    8904  pAllocationRequest->sumItemSize += suballoc.size;
    8905  }
    8906  else
    8907  {
    8908  return false;
    8909  }
    8910  }
    8911  }
    8912  else
    8913  {
    8914  // Already on next page.
    8915  break;
    8916  }
    8917  ++index1st;
    8918  }
    8919  }
    8920  }
    8921 
    8922  // There is enough free space at the end after alignment.
    8923  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
    8924  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
    8925  {
    8926  // Check next suballocations for BufferImageGranularity conflicts.
    8927  // If conflict exists, allocation cannot be made here.
    8928  if(bufferImageGranularity > 1)
    8929  {
    8930  for(size_t nextSuballocIndex = index1st;
    8931  nextSuballocIndex < suballocations1st.size();
    8932  nextSuballocIndex++)
    8933  {
    8934  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
    8935  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8936  {
    8937  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8938  {
    8939  return false;
    8940  }
    8941  }
    8942  else
    8943  {
    8944  // Already on next page.
    8945  break;
    8946  }
    8947  }
    8948  }
    8949 
    8950  // All tests passed: Success.
    8951  pAllocationRequest->offset = resultOffset;
    8952  pAllocationRequest->sumFreeSize =
    8953  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
    8954  - resultBaseOffset
    8955  - pAllocationRequest->sumItemSize;
    8956  // pAllocationRequest->item unused.
    8957  return true;
    8958  }
    8959  }
    8960  }
    8961 
    8962  return false;
    8963 }
    8964 
    8965 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
    8966  uint32_t currentFrameIndex,
    8967  uint32_t frameInUseCount,
    8968  VmaAllocationRequest* pAllocationRequest)
    8969 {
    8970  if(pAllocationRequest->itemsToMakeLostCount == 0)
    8971  {
    8972  return true;
    8973  }
    8974 
    8975  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
    8976 
    8977  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8978  size_t index1st = m_1stNullItemsBeginCount;
    8979  size_t madeLostCount = 0;
    8980  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
    8981  {
    8982  VMA_ASSERT(index1st < suballocations1st.size());
    8983  VmaSuballocation& suballoc = suballocations1st[index1st];
    8984  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    8985  {
    8986  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    8987  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
    8988  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8989  {
    8990  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8991  suballoc.hAllocation = VK_NULL_HANDLE;
    8992  m_SumFreeSize += suballoc.size;
    8993  ++m_1stNullItemsMiddleCount;
    8994  ++madeLostCount;
    8995  }
    8996  else
    8997  {
    8998  return false;
    8999  }
    9000  }
    9001  ++index1st;
    9002  }
    9003 
    9004  CleanupAfterFree();
    9005  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
    9006 
    9007  return true;
    9008 }
    9009 
    9010 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    9011 {
    9012  uint32_t lostAllocationCount = 0;
    9013 
    9014  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9015  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    9016  {
    9017  VmaSuballocation& suballoc = suballocations1st[i];
    9018  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    9019  suballoc.hAllocation->CanBecomeLost() &&
    9020  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9021  {
    9022  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9023  suballoc.hAllocation = VK_NULL_HANDLE;
    9024  ++m_1stNullItemsMiddleCount;
    9025  m_SumFreeSize += suballoc.size;
    9026  ++lostAllocationCount;
    9027  }
    9028  }
    9029 
    9030  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9031  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    9032  {
    9033  VmaSuballocation& suballoc = suballocations2nd[i];
    9034  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    9035  suballoc.hAllocation->CanBecomeLost() &&
    9036  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9037  {
    9038  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9039  suballoc.hAllocation = VK_NULL_HANDLE;
    9040  ++m_2ndNullItemsCount;
    9041  ++lostAllocationCount;
    9042  }
    9043  }
    9044 
    9045  if(lostAllocationCount)
    9046  {
    9047  CleanupAfterFree();
    9048  }
    9049 
    9050  return lostAllocationCount;
    9051 }
    9052 
    9053 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
    9054 {
    9055  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9056  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    9057  {
    9058  const VmaSuballocation& suballoc = suballocations1st[i];
    9059  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9060  {
    9061  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    9062  {
    9063  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    9064  return VK_ERROR_VALIDATION_FAILED_EXT;
    9065  }
    9066  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    9067  {
    9068  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9069  return VK_ERROR_VALIDATION_FAILED_EXT;
    9070  }
    9071  }
    9072  }
    9073 
    9074  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9075  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    9076  {
    9077  const VmaSuballocation& suballoc = suballocations2nd[i];
    9078  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9079  {
    9080  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    9081  {
    9082  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    9083  return VK_ERROR_VALIDATION_FAILED_EXT;
    9084  }
    9085  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    9086  {
    9087  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9088  return VK_ERROR_VALIDATION_FAILED_EXT;
    9089  }
    9090  }
    9091  }
    9092 
    9093  return VK_SUCCESS;
    9094 }
    9095 
    9096 void VmaBlockMetadata_Linear::Alloc(
    9097  const VmaAllocationRequest& request,
    9098  VmaSuballocationType type,
    9099  VkDeviceSize allocSize,
    9100  bool upperAddress,
    9101  VmaAllocation hAllocation)
    9102 {
    9103  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
    9104 
    9105  if(upperAddress)
    9106  {
    9107  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
    9108  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
    9109  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9110  suballocations2nd.push_back(newSuballoc);
    9111  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
    9112  }
    9113  else
    9114  {
    9115  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9116 
    9117  // First allocation.
    9118  if(suballocations1st.empty())
    9119  {
    9120  suballocations1st.push_back(newSuballoc);
    9121  }
    9122  else
    9123  {
    9124  // New allocation at the end of 1st vector.
    9125  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
    9126  {
    9127  // Check if it fits before the end of the block.
    9128  VMA_ASSERT(request.offset + allocSize <= GetSize());
    9129  suballocations1st.push_back(newSuballoc);
    9130  }
    9131  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
    9132  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
    9133  {
    9134  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9135 
    9136  switch(m_2ndVectorMode)
    9137  {
    9138  case SECOND_VECTOR_EMPTY:
    9139  // First allocation from second part ring buffer.
    9140  VMA_ASSERT(suballocations2nd.empty());
    9141  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
    9142  break;
    9143  case SECOND_VECTOR_RING_BUFFER:
    9144  // 2-part ring buffer is already started.
    9145  VMA_ASSERT(!suballocations2nd.empty());
    9146  break;
    9147  case SECOND_VECTOR_DOUBLE_STACK:
    9148  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
    9149  break;
    9150  default:
    9151  VMA_ASSERT(0);
    9152  }
    9153 
    9154  suballocations2nd.push_back(newSuballoc);
    9155  }
    9156  else
    9157  {
    9158  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
    9159  }
    9160  }
    9161  }
    9162 
    9163  m_SumFreeSize -= newSuballoc.size;
    9164 }
    9165 
    9166 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
    9167 {
    9168  FreeAtOffset(allocation->GetOffset());
    9169 }
    9170 
    9171 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
    9172 {
    9173  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9174  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9175 
    9176  if(!suballocations1st.empty())
    9177  {
    9178  // First allocation: Mark it as next empty at the beginning.
    9179  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    9180  if(firstSuballoc.offset == offset)
    9181  {
    9182  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9183  firstSuballoc.hAllocation = VK_NULL_HANDLE;
    9184  m_SumFreeSize += firstSuballoc.size;
    9185  ++m_1stNullItemsBeginCount;
    9186  CleanupAfterFree();
    9187  return;
    9188  }
    9189  }
    9190 
    9191  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
    9192  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
    9193  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9194  {
    9195  VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9196  if(lastSuballoc.offset == offset)
    9197  {
    9198  m_SumFreeSize += lastSuballoc.size;
    9199  suballocations2nd.pop_back();
    9200  CleanupAfterFree();
    9201  return;
    9202  }
    9203  }
    9204  // Last allocation in 1st vector.
    9205  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
    9206  {
    9207  VmaSuballocation& lastSuballoc = suballocations1st.back();
    9208  if(lastSuballoc.offset == offset)
    9209  {
    9210  m_SumFreeSize += lastSuballoc.size;
    9211  suballocations1st.pop_back();
    9212  CleanupAfterFree();
    9213  return;
    9214  }
    9215  }
    9216 
    9217  // Item from the middle of 1st vector.
    9218  {
    9219  VmaSuballocation refSuballoc;
    9220  refSuballoc.offset = offset;
    9221  // Rest of members stays uninitialized intentionally for better performance.
    9222  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
    9223  suballocations1st.begin() + m_1stNullItemsBeginCount,
    9224  suballocations1st.end(),
    9225  refSuballoc);
    9226  if(it != suballocations1st.end())
    9227  {
    9228  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9229  it->hAllocation = VK_NULL_HANDLE;
    9230  ++m_1stNullItemsMiddleCount;
    9231  m_SumFreeSize += it->size;
    9232  CleanupAfterFree();
    9233  return;
    9234  }
    9235  }
    9236 
    9237  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
    9238  {
    9239  // Item from the middle of 2nd vector.
    9240  VmaSuballocation refSuballoc;
    9241  refSuballoc.offset = offset;
    9242  // Rest of members stays uninitialized intentionally for better performance.
    9243  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
    9244  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
    9245  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
    9246  if(it != suballocations2nd.end())
    9247  {
    9248  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9249  it->hAllocation = VK_NULL_HANDLE;
    9250  ++m_2ndNullItemsCount;
    9251  m_SumFreeSize += it->size;
    9252  CleanupAfterFree();
    9253  return;
    9254  }
    9255  }
    9256 
    9257  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
    9258 }
    9259 
    9260 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
    9261 {
    9262  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9263  const size_t suballocCount = AccessSuballocations1st().size();
    9264  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
    9265 }
    9266 
    9267 void VmaBlockMetadata_Linear::CleanupAfterFree()
    9268 {
    9269  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9270  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9271 
    9272  if(IsEmpty())
    9273  {
    9274  suballocations1st.clear();
    9275  suballocations2nd.clear();
    9276  m_1stNullItemsBeginCount = 0;
    9277  m_1stNullItemsMiddleCount = 0;
    9278  m_2ndNullItemsCount = 0;
    9279  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9280  }
    9281  else
    9282  {
    9283  const size_t suballoc1stCount = suballocations1st.size();
    9284  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9285  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
    9286 
    9287  // Find more null items at the beginning of 1st vector.
    9288  while(m_1stNullItemsBeginCount < suballoc1stCount &&
    9289  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9290  {
    9291  ++m_1stNullItemsBeginCount;
    9292  --m_1stNullItemsMiddleCount;
    9293  }
    9294 
    9295  // Find more null items at the end of 1st vector.
    9296  while(m_1stNullItemsMiddleCount > 0 &&
    9297  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
    9298  {
    9299  --m_1stNullItemsMiddleCount;
    9300  suballocations1st.pop_back();
    9301  }
    9302 
    9303  // Find more null items at the end of 2nd vector.
    9304  while(m_2ndNullItemsCount > 0 &&
    9305  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
    9306  {
    9307  --m_2ndNullItemsCount;
    9308  suballocations2nd.pop_back();
    9309  }
    9310 
    9311  if(ShouldCompact1st())
    9312  {
    9313  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
    9314  size_t srcIndex = m_1stNullItemsBeginCount;
    9315  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
    9316  {
    9317  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
    9318  {
    9319  ++srcIndex;
    9320  }
    9321  if(dstIndex != srcIndex)
    9322  {
    9323  suballocations1st[dstIndex] = suballocations1st[srcIndex];
    9324  }
    9325  ++srcIndex;
    9326  }
    9327  suballocations1st.resize(nonNullItemCount);
    9328  m_1stNullItemsBeginCount = 0;
    9329  m_1stNullItemsMiddleCount = 0;
    9330  }
    9331 
    9332  // 2nd vector became empty.
    9333  if(suballocations2nd.empty())
    9334  {
    9335  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9336  }
    9337 
    9338  // 1st vector became empty.
    9339  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
    9340  {
    9341  suballocations1st.clear();
    9342  m_1stNullItemsBeginCount = 0;
    9343 
    9344  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9345  {
    9346  // Swap 1st with 2nd. Now 2nd is empty.
    9347  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9348  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
    9349  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
    9350  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9351  {
    9352  ++m_1stNullItemsBeginCount;
    9353  --m_1stNullItemsMiddleCount;
    9354  }
    9355  m_2ndNullItemsCount = 0;
    9356  m_1stVectorIndex ^= 1;
    9357  }
    9358  }
    9359  }
    9360 
    9361  VMA_HEAVY_ASSERT(Validate());
    9362 }
    9363 
    9364 
    9366 // class VmaBlockMetadata_Buddy
    9367 
    9368 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
    9369  VmaBlockMetadata(hAllocator),
    9370  m_Root(VMA_NULL),
    9371  m_AllocationCount(0),
    9372  m_FreeCount(1),
    9373  m_SumFreeSize(0)
    9374 {
    9375  memset(m_FreeList, 0, sizeof(m_FreeList));
    9376 }
    9377 
    9378 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
    9379 {
    9380  DeleteNode(m_Root);
    9381 }
    9382 
    9383 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
    9384 {
    9385  VmaBlockMetadata::Init(size);
    9386 
    9387  m_UsableSize = VmaPrevPow2(size);
    9388  m_SumFreeSize = m_UsableSize;
    9389 
    9390  // Calculate m_LevelCount.
    9391  m_LevelCount = 1;
    9392  while(m_LevelCount < MAX_LEVELS &&
    9393  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
    9394  {
    9395  ++m_LevelCount;
    9396  }
    9397 
    9398  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
    9399  rootNode->offset = 0;
    9400  rootNode->type = Node::TYPE_FREE;
    9401  rootNode->parent = VMA_NULL;
    9402  rootNode->buddy = VMA_NULL;
    9403 
    9404  m_Root = rootNode;
    9405  AddToFreeListFront(0, rootNode);
    9406 }
    9407 
    9408 bool VmaBlockMetadata_Buddy::Validate() const
    9409 {
    9410  // Validate tree.
    9411  ValidationContext ctx;
    9412  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
    9413  {
    9414  VMA_VALIDATE(false && "ValidateNode failed.");
    9415  }
    9416  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
    9417  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
    9418 
    9419  // Validate free node lists.
    9420  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9421  {
    9422  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
    9423  m_FreeList[level].front->free.prev == VMA_NULL);
    9424 
    9425  for(Node* node = m_FreeList[level].front;
    9426  node != VMA_NULL;
    9427  node = node->free.next)
    9428  {
    9429  VMA_VALIDATE(node->type == Node::TYPE_FREE);
    9430 
    9431  if(node->free.next == VMA_NULL)
    9432  {
    9433  VMA_VALIDATE(m_FreeList[level].back == node);
    9434  }
    9435  else
    9436  {
    9437  VMA_VALIDATE(node->free.next->free.prev == node);
    9438  }
    9439  }
    9440  }
    9441 
    9442  // Validate that free lists ar higher levels are empty.
    9443  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
    9444  {
    9445  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
    9446  }
    9447 
    9448  return true;
    9449 }
    9450 
    9451 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
    9452 {
    9453  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9454  {
    9455  if(m_FreeList[level].front != VMA_NULL)
    9456  {
    9457  return LevelToNodeSize(level);
    9458  }
    9459  }
    9460  return 0;
    9461 }
    9462 
    9463 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    9464 {
    9465  const VkDeviceSize unusableSize = GetUnusableSize();
    9466 
    9467  outInfo.blockCount = 1;
    9468 
    9469  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
    9470  outInfo.usedBytes = outInfo.unusedBytes = 0;
    9471 
    9472  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
    9473  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
    9474  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
    9475 
    9476  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
    9477 
    9478  if(unusableSize > 0)
    9479  {
    9480  ++outInfo.unusedRangeCount;
    9481  outInfo.unusedBytes += unusableSize;
    9482  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
    9483  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
    9484  }
    9485 }
    9486 
    9487 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
    9488 {
    9489  const VkDeviceSize unusableSize = GetUnusableSize();
    9490 
    9491  inoutStats.size += GetSize();
    9492  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
    9493  inoutStats.allocationCount += m_AllocationCount;
    9494  inoutStats.unusedRangeCount += m_FreeCount;
    9495  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    9496 
    9497  if(unusableSize > 0)
    9498  {
    9499  ++inoutStats.unusedRangeCount;
    9500  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
    9501  }
    9502 }
    9503 
    9504 #if VMA_STATS_STRING_ENABLED
    9505 
    9506 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
    9507 {
    9508  // TODO optimize
    9509  VmaStatInfo stat;
    9510  CalcAllocationStatInfo(stat);
    9511 
    9512  PrintDetailedMap_Begin(
    9513  json,
    9514  stat.unusedBytes,
    9515  stat.allocationCount,
    9516  stat.unusedRangeCount);
    9517 
    9518  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
    9519 
    9520  const VkDeviceSize unusableSize = GetUnusableSize();
    9521  if(unusableSize > 0)
    9522  {
    9523  PrintDetailedMap_UnusedRange(json,
    9524  m_UsableSize, // offset
    9525  unusableSize); // size
    9526  }
    9527 
    9528  PrintDetailedMap_End(json);
    9529 }
    9530 
    9531 #endif // #if VMA_STATS_STRING_ENABLED
    9532 
    9533 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
    9534  uint32_t currentFrameIndex,
    9535  uint32_t frameInUseCount,
    9536  VkDeviceSize bufferImageGranularity,
    9537  VkDeviceSize allocSize,
    9538  VkDeviceSize allocAlignment,
    9539  bool upperAddress,
    9540  VmaSuballocationType allocType,
    9541  bool canMakeOtherLost,
    9542  uint32_t strategy,
    9543  VmaAllocationRequest* pAllocationRequest)
    9544 {
    9545  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
    9546 
    9547  // Simple way to respect bufferImageGranularity. May be optimized some day.
    9548  // Whenever it might be an OPTIMAL image...
    9549  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
    9550  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    9551  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
    9552  {
    9553  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
    9554  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
    9555  }
    9556 
    9557  if(allocSize > m_UsableSize)
    9558  {
    9559  return false;
    9560  }
    9561 
    9562  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9563  for(uint32_t level = targetLevel + 1; level--; )
    9564  {
    9565  for(Node* freeNode = m_FreeList[level].front;
    9566  freeNode != VMA_NULL;
    9567  freeNode = freeNode->free.next)
    9568  {
    9569  if(freeNode->offset % allocAlignment == 0)
    9570  {
    9571  pAllocationRequest->offset = freeNode->offset;
    9572  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
    9573  pAllocationRequest->sumItemSize = 0;
    9574  pAllocationRequest->itemsToMakeLostCount = 0;
    9575  pAllocationRequest->customData = (void*)(uintptr_t)level;
    9576  return true;
    9577  }
    9578  }
    9579  }
    9580 
    9581  return false;
    9582 }
    9583 
    9584 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
    9585  uint32_t currentFrameIndex,
    9586  uint32_t frameInUseCount,
    9587  VmaAllocationRequest* pAllocationRequest)
    9588 {
    9589  /*
    9590  Lost allocations are not supported in buddy allocator at the moment.
    9591  Support might be added in the future.
    9592  */
    9593  return pAllocationRequest->itemsToMakeLostCount == 0;
    9594 }
    9595 
    9596 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    9597 {
    9598  /*
    9599  Lost allocations are not supported in buddy allocator at the moment.
    9600  Support might be added in the future.
    9601  */
    9602  return 0;
    9603 }
    9604 
    9605 void VmaBlockMetadata_Buddy::Alloc(
    9606  const VmaAllocationRequest& request,
    9607  VmaSuballocationType type,
    9608  VkDeviceSize allocSize,
    9609  bool upperAddress,
    9610  VmaAllocation hAllocation)
    9611 {
    9612  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9613  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
    9614 
    9615  Node* currNode = m_FreeList[currLevel].front;
    9616  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9617  while(currNode->offset != request.offset)
    9618  {
    9619  currNode = currNode->free.next;
    9620  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9621  }
    9622 
    9623  // Go down, splitting free nodes.
    9624  while(currLevel < targetLevel)
    9625  {
    9626  // currNode is already first free node at currLevel.
    9627  // Remove it from list of free nodes at this currLevel.
    9628  RemoveFromFreeList(currLevel, currNode);
    9629 
    9630  const uint32_t childrenLevel = currLevel + 1;
    9631 
    9632  // Create two free sub-nodes.
    9633  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
    9634  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
    9635 
    9636  leftChild->offset = currNode->offset;
    9637  leftChild->type = Node::TYPE_FREE;
    9638  leftChild->parent = currNode;
    9639  leftChild->buddy = rightChild;
    9640 
    9641  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
    9642  rightChild->type = Node::TYPE_FREE;
    9643  rightChild->parent = currNode;
    9644  rightChild->buddy = leftChild;
    9645 
    9646  // Convert current currNode to split type.
    9647  currNode->type = Node::TYPE_SPLIT;
    9648  currNode->split.leftChild = leftChild;
    9649 
    9650  // Add child nodes to free list. Order is important!
    9651  AddToFreeListFront(childrenLevel, rightChild);
    9652  AddToFreeListFront(childrenLevel, leftChild);
    9653 
    9654  ++m_FreeCount;
    9655  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
    9656  ++currLevel;
    9657  currNode = m_FreeList[currLevel].front;
    9658 
    9659  /*
    9660  We can be sure that currNode, as left child of node previously split,
    9661  also fullfills the alignment requirement.
    9662  */
    9663  }
    9664 
    9665  // Remove from free list.
    9666  VMA_ASSERT(currLevel == targetLevel &&
    9667  currNode != VMA_NULL &&
    9668  currNode->type == Node::TYPE_FREE);
    9669  RemoveFromFreeList(currLevel, currNode);
    9670 
    9671  // Convert to allocation node.
    9672  currNode->type = Node::TYPE_ALLOCATION;
    9673  currNode->allocation.alloc = hAllocation;
    9674 
    9675  ++m_AllocationCount;
    9676  --m_FreeCount;
    9677  m_SumFreeSize -= allocSize;
    9678 }
    9679 
    9680 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
    9681 {
    9682  if(node->type == Node::TYPE_SPLIT)
    9683  {
    9684  DeleteNode(node->split.leftChild->buddy);
    9685  DeleteNode(node->split.leftChild);
    9686  }
    9687 
    9688  vma_delete(GetAllocationCallbacks(), node);
    9689 }
    9690 
    9691 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
    9692 {
    9693  VMA_VALIDATE(level < m_LevelCount);
    9694  VMA_VALIDATE(curr->parent == parent);
    9695  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
    9696  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
    9697  switch(curr->type)
    9698  {
    9699  case Node::TYPE_FREE:
    9700  // curr->free.prev, next are validated separately.
    9701  ctx.calculatedSumFreeSize += levelNodeSize;
    9702  ++ctx.calculatedFreeCount;
    9703  break;
    9704  case Node::TYPE_ALLOCATION:
    9705  ++ctx.calculatedAllocationCount;
    9706  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
    9707  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
    9708  break;
    9709  case Node::TYPE_SPLIT:
    9710  {
    9711  const uint32_t childrenLevel = level + 1;
    9712  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
    9713  const Node* const leftChild = curr->split.leftChild;
    9714  VMA_VALIDATE(leftChild != VMA_NULL);
    9715  VMA_VALIDATE(leftChild->offset == curr->offset);
    9716  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
    9717  {
    9718  VMA_VALIDATE(false && "ValidateNode for left child failed.");
    9719  }
    9720  const Node* const rightChild = leftChild->buddy;
    9721  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
    9722  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
    9723  {
    9724  VMA_VALIDATE(false && "ValidateNode for right child failed.");
    9725  }
    9726  }
    9727  break;
    9728  default:
    9729  return false;
    9730  }
    9731 
    9732  return true;
    9733 }
    9734 
    9735 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
    9736 {
    9737  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
    9738  uint32_t level = 0;
    9739  VkDeviceSize currLevelNodeSize = m_UsableSize;
    9740  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
    9741  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
    9742  {
    9743  ++level;
    9744  currLevelNodeSize = nextLevelNodeSize;
    9745  nextLevelNodeSize = currLevelNodeSize >> 1;
    9746  }
    9747  return level;
    9748 }
    9749 
    9750 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
    9751 {
    9752  // Find node and level.
    9753  Node* node = m_Root;
    9754  VkDeviceSize nodeOffset = 0;
    9755  uint32_t level = 0;
    9756  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
    9757  while(node->type == Node::TYPE_SPLIT)
    9758  {
    9759  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
    9760  if(offset < nodeOffset + nextLevelSize)
    9761  {
    9762  node = node->split.leftChild;
    9763  }
    9764  else
    9765  {
    9766  node = node->split.leftChild->buddy;
    9767  nodeOffset += nextLevelSize;
    9768  }
    9769  ++level;
    9770  levelNodeSize = nextLevelSize;
    9771  }
    9772 
    9773  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
    9774  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
    9775 
    9776  ++m_FreeCount;
    9777  --m_AllocationCount;
    9778  m_SumFreeSize += alloc->GetSize();
    9779 
    9780  node->type = Node::TYPE_FREE;
    9781 
    9782  // Join free nodes if possible.
    9783  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
    9784  {
    9785  RemoveFromFreeList(level, node->buddy);
    9786  Node* const parent = node->parent;
    9787 
    9788  vma_delete(GetAllocationCallbacks(), node->buddy);
    9789  vma_delete(GetAllocationCallbacks(), node);
    9790  parent->type = Node::TYPE_FREE;
    9791 
    9792  node = parent;
    9793  --level;
    9794  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
    9795  --m_FreeCount;
    9796  }
    9797 
    9798  AddToFreeListFront(level, node);
    9799 }
    9800 
    9801 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
    9802 {
    9803  switch(node->type)
    9804  {
    9805  case Node::TYPE_FREE:
    9806  ++outInfo.unusedRangeCount;
    9807  outInfo.unusedBytes += levelNodeSize;
    9808  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
    9809  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
    9810  break;
    9811  case Node::TYPE_ALLOCATION:
    9812  {
    9813  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    9814  ++outInfo.allocationCount;
    9815  outInfo.usedBytes += allocSize;
    9816  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
    9817  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
    9818 
    9819  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
    9820  if(unusedRangeSize > 0)
    9821  {
    9822  ++outInfo.unusedRangeCount;
    9823  outInfo.unusedBytes += unusedRangeSize;
    9824  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9825  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9826  }
    9827  }
    9828  break;
    9829  case Node::TYPE_SPLIT:
    9830  {
    9831  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    9832  const Node* const leftChild = node->split.leftChild;
    9833  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
    9834  const Node* const rightChild = leftChild->buddy;
    9835  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
    9836  }
    9837  break;
    9838  default:
    9839  VMA_ASSERT(0);
    9840  }
    9841 }
    9842 
    9843 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
    9844 {
    9845  VMA_ASSERT(node->type == Node::TYPE_FREE);
    9846 
    9847  // List is empty.
    9848  Node* const frontNode = m_FreeList[level].front;
    9849  if(frontNode == VMA_NULL)
    9850  {
    9851  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
    9852  node->free.prev = node->free.next = VMA_NULL;
    9853  m_FreeList[level].front = m_FreeList[level].back = node;
    9854  }
    9855  else
    9856  {
    9857  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
    9858  node->free.prev = VMA_NULL;
    9859  node->free.next = frontNode;
    9860  frontNode->free.prev = node;
    9861  m_FreeList[level].front = node;
    9862  }
    9863 }
    9864 
    9865 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
    9866 {
    9867  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
    9868 
    9869  // It is at the front.
    9870  if(node->free.prev == VMA_NULL)
    9871  {
    9872  VMA_ASSERT(m_FreeList[level].front == node);
    9873  m_FreeList[level].front = node->free.next;
    9874  }
    9875  else
    9876  {
    9877  Node* const prevFreeNode = node->free.prev;
    9878  VMA_ASSERT(prevFreeNode->free.next == node);
    9879  prevFreeNode->free.next = node->free.next;
    9880  }
    9881 
    9882  // It is at the back.
    9883  if(node->free.next == VMA_NULL)
    9884  {
    9885  VMA_ASSERT(m_FreeList[level].back == node);
    9886  m_FreeList[level].back = node->free.prev;
    9887  }
    9888  else
    9889  {
    9890  Node* const nextFreeNode = node->free.next;
    9891  VMA_ASSERT(nextFreeNode->free.prev == node);
    9892  nextFreeNode->free.prev = node->free.prev;
    9893  }
    9894 }
    9895 
    9896 #if VMA_STATS_STRING_ENABLED
    9897 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
    9898 {
    9899  switch(node->type)
    9900  {
    9901  case Node::TYPE_FREE:
    9902  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
    9903  break;
    9904  case Node::TYPE_ALLOCATION:
    9905  {
    9906  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
    9907  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    9908  if(allocSize < levelNodeSize)
    9909  {
    9910  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
    9911  }
    9912  }
    9913  break;
    9914  case Node::TYPE_SPLIT:
    9915  {
    9916  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    9917  const Node* const leftChild = node->split.leftChild;
    9918  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
    9919  const Node* const rightChild = leftChild->buddy;
    9920  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
    9921  }
    9922  break;
    9923  default:
    9924  VMA_ASSERT(0);
    9925  }
    9926 }
    9927 #endif // #if VMA_STATS_STRING_ENABLED
    9928 
    9929 
    9931 // class VmaDeviceMemoryBlock
    9932 
    9933 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    9934  m_pMetadata(VMA_NULL),
    9935  m_MemoryTypeIndex(UINT32_MAX),
    9936  m_Id(0),
    9937  m_hMemory(VK_NULL_HANDLE),
    9938  m_MapCount(0),
    9939  m_pMappedData(VMA_NULL)
    9940 {
    9941 }
    9942 
    9943 void VmaDeviceMemoryBlock::Init(
    9944  VmaAllocator hAllocator,
    9945  uint32_t newMemoryTypeIndex,
    9946  VkDeviceMemory newMemory,
    9947  VkDeviceSize newSize,
    9948  uint32_t id,
    9949  uint32_t algorithm)
    9950 {
    9951  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    9952 
    9953  m_MemoryTypeIndex = newMemoryTypeIndex;
    9954  m_Id = id;
    9955  m_hMemory = newMemory;
    9956 
    9957  switch(algorithm)
    9958  {
    9960  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
    9961  break;
    9963  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
    9964  break;
    9965  default:
    9966  VMA_ASSERT(0);
    9967  // Fall-through.
    9968  case 0:
    9969  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
    9970  }
    9971  m_pMetadata->Init(newSize);
    9972 }
    9973 
    9974 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    9975 {
    9976  // This is the most important assert in the entire library.
    9977  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    9978  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    9979 
    9980  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    9981  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
    9982  m_hMemory = VK_NULL_HANDLE;
    9983 
    9984  vma_delete(allocator, m_pMetadata);
    9985  m_pMetadata = VMA_NULL;
    9986 }
    9987 
    9988 bool VmaDeviceMemoryBlock::Validate() const
    9989 {
    9990  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
    9991  (m_pMetadata->GetSize() != 0));
    9992 
    9993  return m_pMetadata->Validate();
    9994 }
    9995 
    9996 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
    9997 {
    9998  void* pData = nullptr;
    9999  VkResult res = Map(hAllocator, 1, &pData);
    10000  if(res != VK_SUCCESS)
    10001  {
    10002  return res;
    10003  }
    10004 
    10005  res = m_pMetadata->CheckCorruption(pData);
    10006 
    10007  Unmap(hAllocator, 1);
    10008 
    10009  return res;
    10010 }
    10011 
    10012 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
    10013 {
    10014  if(count == 0)
    10015  {
    10016  return VK_SUCCESS;
    10017  }
    10018 
    10019  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10020  if(m_MapCount != 0)
    10021  {
    10022  m_MapCount += count;
    10023  VMA_ASSERT(m_pMappedData != VMA_NULL);
    10024  if(ppData != VMA_NULL)
    10025  {
    10026  *ppData = m_pMappedData;
    10027  }
    10028  return VK_SUCCESS;
    10029  }
    10030  else
    10031  {
    10032  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    10033  hAllocator->m_hDevice,
    10034  m_hMemory,
    10035  0, // offset
    10036  VK_WHOLE_SIZE,
    10037  0, // flags
    10038  &m_pMappedData);
    10039  if(result == VK_SUCCESS)
    10040  {
    10041  if(ppData != VMA_NULL)
    10042  {
    10043  *ppData = m_pMappedData;
    10044  }
    10045  m_MapCount = count;
    10046  }
    10047  return result;
    10048  }
    10049 }
    10050 
    10051 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
    10052 {
    10053  if(count == 0)
    10054  {
    10055  return;
    10056  }
    10057 
    10058  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10059  if(m_MapCount >= count)
    10060  {
    10061  m_MapCount -= count;
    10062  if(m_MapCount == 0)
    10063  {
    10064  m_pMappedData = VMA_NULL;
    10065  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
    10066  }
    10067  }
    10068  else
    10069  {
    10070  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    10071  }
    10072 }
    10073 
    10074 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10075 {
    10076  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10077  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10078 
    10079  void* pData;
    10080  VkResult res = Map(hAllocator, 1, &pData);
    10081  if(res != VK_SUCCESS)
    10082  {
    10083  return res;
    10084  }
    10085 
    10086  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
    10087  VmaWriteMagicValue(pData, allocOffset + allocSize);
    10088 
    10089  Unmap(hAllocator, 1);
    10090 
    10091  return VK_SUCCESS;
    10092 }
    10093 
    10094 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10095 {
    10096  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10097  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10098 
    10099  void* pData;
    10100  VkResult res = Map(hAllocator, 1, &pData);
    10101  if(res != VK_SUCCESS)
    10102  {
    10103  return res;
    10104  }
    10105 
    10106  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
    10107  {
    10108  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
    10109  }
    10110  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
    10111  {
    10112  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
    10113  }
    10114 
    10115  Unmap(hAllocator, 1);
    10116 
    10117  return VK_SUCCESS;
    10118 }
    10119 
    10120 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
    10121  const VmaAllocator hAllocator,
    10122  const VmaAllocation hAllocation,
    10123  VkBuffer hBuffer)
    10124 {
    10125  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10126  hAllocation->GetBlock() == this);
    10127  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10128  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10129  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
    10130  hAllocator->m_hDevice,
    10131  hBuffer,
    10132  m_hMemory,
    10133  hAllocation->GetOffset());
    10134 }
    10135 
    10136 VkResult VmaDeviceMemoryBlock::BindImageMemory(
    10137  const VmaAllocator hAllocator,
    10138  const VmaAllocation hAllocation,
    10139  VkImage hImage)
    10140 {
    10141  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10142  hAllocation->GetBlock() == this);
    10143  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10144  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10145  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
    10146  hAllocator->m_hDevice,
    10147  hImage,
    10148  m_hMemory,
    10149  hAllocation->GetOffset());
    10150 }
    10151 
    10152 static void InitStatInfo(VmaStatInfo& outInfo)
    10153 {
    10154  memset(&outInfo, 0, sizeof(outInfo));
    10155  outInfo.allocationSizeMin = UINT64_MAX;
    10156  outInfo.unusedRangeSizeMin = UINT64_MAX;
    10157 }
    10158 
    10159 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    10160 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    10161 {
    10162  inoutInfo.blockCount += srcInfo.blockCount;
    10163  inoutInfo.allocationCount += srcInfo.allocationCount;
    10164  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    10165  inoutInfo.usedBytes += srcInfo.usedBytes;
    10166  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    10167  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    10168  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    10169  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    10170  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    10171 }
    10172 
    10173 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    10174 {
    10175  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    10176  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    10177  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    10178  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    10179 }
    10180 
    10181 VmaPool_T::VmaPool_T(
    10182  VmaAllocator hAllocator,
    10183  const VmaPoolCreateInfo& createInfo,
    10184  VkDeviceSize preferredBlockSize) :
    10185  m_BlockVector(
    10186  hAllocator,
    10187  createInfo.memoryTypeIndex,
    10188  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
    10189  createInfo.minBlockCount,
    10190  createInfo.maxBlockCount,
    10191  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    10192  createInfo.frameInUseCount,
    10193  true, // isCustomPool
    10194  createInfo.blockSize != 0, // explicitBlockSize
    10195  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
    10196  m_Id(0)
    10197 {
    10198 }
    10199 
    10200 VmaPool_T::~VmaPool_T()
    10201 {
    10202 }
    10203 
    10204 #if VMA_STATS_STRING_ENABLED
    10205 
    10206 #endif // #if VMA_STATS_STRING_ENABLED
    10207 
    10208 VmaBlockVector::VmaBlockVector(
    10209  VmaAllocator hAllocator,
    10210  uint32_t memoryTypeIndex,
    10211  VkDeviceSize preferredBlockSize,
    10212  size_t minBlockCount,
    10213  size_t maxBlockCount,
    10214  VkDeviceSize bufferImageGranularity,
    10215  uint32_t frameInUseCount,
    10216  bool isCustomPool,
    10217  bool explicitBlockSize,
    10218  uint32_t algorithm) :
    10219  m_hAllocator(hAllocator),
    10220  m_MemoryTypeIndex(memoryTypeIndex),
    10221  m_PreferredBlockSize(preferredBlockSize),
    10222  m_MinBlockCount(minBlockCount),
    10223  m_MaxBlockCount(maxBlockCount),
    10224  m_BufferImageGranularity(bufferImageGranularity),
    10225  m_FrameInUseCount(frameInUseCount),
    10226  m_IsCustomPool(isCustomPool),
    10227  m_ExplicitBlockSize(explicitBlockSize),
    10228  m_Algorithm(algorithm),
    10229  m_HasEmptyBlock(false),
    10230  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    10231  m_pDefragmentator(VMA_NULL),
    10232  m_NextBlockId(0)
    10233 {
    10234 }
    10235 
    10236 VmaBlockVector::~VmaBlockVector()
    10237 {
    10238  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
    10239 
    10240  for(size_t i = m_Blocks.size(); i--; )
    10241  {
    10242  m_Blocks[i]->Destroy(m_hAllocator);
    10243  vma_delete(m_hAllocator, m_Blocks[i]);
    10244  }
    10245 }
    10246 
    10247 VkResult VmaBlockVector::CreateMinBlocks()
    10248 {
    10249  for(size_t i = 0; i < m_MinBlockCount; ++i)
    10250  {
    10251  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    10252  if(res != VK_SUCCESS)
    10253  {
    10254  return res;
    10255  }
    10256  }
    10257  return VK_SUCCESS;
    10258 }
    10259 
    10260 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    10261 {
    10262  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10263 
    10264  const size_t blockCount = m_Blocks.size();
    10265 
    10266  pStats->size = 0;
    10267  pStats->unusedSize = 0;
    10268  pStats->allocationCount = 0;
    10269  pStats->unusedRangeCount = 0;
    10270  pStats->unusedRangeSizeMax = 0;
    10271  pStats->blockCount = blockCount;
    10272 
    10273  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    10274  {
    10275  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10276  VMA_ASSERT(pBlock);
    10277  VMA_HEAVY_ASSERT(pBlock->Validate());
    10278  pBlock->m_pMetadata->AddPoolStats(*pStats);
    10279  }
    10280 }
    10281 
    10282 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
    10283 {
    10284  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    10285  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
    10286  (VMA_DEBUG_MARGIN > 0) &&
    10287  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
    10288 }
    10289 
    10290 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    10291 
    10292 VkResult VmaBlockVector::Allocate(
    10293  VmaPool hCurrentPool,
    10294  uint32_t currentFrameIndex,
    10295  VkDeviceSize size,
    10296  VkDeviceSize alignment,
    10297  const VmaAllocationCreateInfo& createInfo,
    10298  VmaSuballocationType suballocType,
    10299  size_t allocationCount,
    10300  VmaAllocation* pAllocations)
    10301 {
    10302  size_t allocIndex;
    10303  VkResult res = VK_SUCCESS;
    10304 
    10305  {
    10306  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10307  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    10308  {
    10309  res = AllocatePage(
    10310  hCurrentPool,
    10311  currentFrameIndex,
    10312  size,
    10313  alignment,
    10314  createInfo,
    10315  suballocType,
    10316  pAllocations + allocIndex);
    10317  if(res != VK_SUCCESS)
    10318  {
    10319  break;
    10320  }
    10321  }
    10322  }
    10323 
    10324  if(res != VK_SUCCESS)
    10325  {
    10326  // Free all already created allocations.
    10327  while(allocIndex--)
    10328  {
    10329  Free(pAllocations[allocIndex]);
    10330  }
    10331  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
    10332  }
    10333 
    10334  return res;
    10335 }
    10336 
    10337 VkResult VmaBlockVector::AllocatePage(
    10338  VmaPool hCurrentPool,
    10339  uint32_t currentFrameIndex,
    10340  VkDeviceSize size,
    10341  VkDeviceSize alignment,
    10342  const VmaAllocationCreateInfo& createInfo,
    10343  VmaSuballocationType suballocType,
    10344  VmaAllocation* pAllocation)
    10345 {
    10346  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10347  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    10348  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10349  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10350  const bool canCreateNewBlock =
    10351  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    10352  (m_Blocks.size() < m_MaxBlockCount);
    10353  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
    10354 
    10355  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
    10356  // Which in turn is available only when maxBlockCount = 1.
    10357  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
    10358  {
    10359  canMakeOtherLost = false;
    10360  }
    10361 
    10362  // Upper address can only be used with linear allocator and within single memory block.
    10363  if(isUpperAddress &&
    10364  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
    10365  {
    10366  return VK_ERROR_FEATURE_NOT_PRESENT;
    10367  }
    10368 
    10369  // Validate strategy.
    10370  switch(strategy)
    10371  {
    10372  case 0:
    10374  break;
    10378  break;
    10379  default:
    10380  return VK_ERROR_FEATURE_NOT_PRESENT;
    10381  }
    10382 
    10383  // Early reject: requested allocation size is larger that maximum block size for this block vector.
    10384  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
    10385  {
    10386  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10387  }
    10388 
    10389  /*
    10390  Under certain condition, this whole section can be skipped for optimization, so
    10391  we move on directly to trying to allocate with canMakeOtherLost. That's the case
    10392  e.g. for custom pools with linear algorithm.
    10393  */
    10394  if(!canMakeOtherLost || canCreateNewBlock)
    10395  {
    10396  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    10397  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
    10399 
    10400  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10401  {
    10402  // Use only last block.
    10403  if(!m_Blocks.empty())
    10404  {
    10405  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
    10406  VMA_ASSERT(pCurrBlock);
    10407  VkResult res = AllocateFromBlock(
    10408  pCurrBlock,
    10409  hCurrentPool,
    10410  currentFrameIndex,
    10411  size,
    10412  alignment,
    10413  allocFlagsCopy,
    10414  createInfo.pUserData,
    10415  suballocType,
    10416  strategy,
    10417  pAllocation);
    10418  if(res == VK_SUCCESS)
    10419  {
    10420  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
    10421  return VK_SUCCESS;
    10422  }
    10423  }
    10424  }
    10425  else
    10426  {
    10428  {
    10429  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10430  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10431  {
    10432  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10433  VMA_ASSERT(pCurrBlock);
    10434  VkResult res = AllocateFromBlock(
    10435  pCurrBlock,
    10436  hCurrentPool,
    10437  currentFrameIndex,
    10438  size,
    10439  alignment,
    10440  allocFlagsCopy,
    10441  createInfo.pUserData,
    10442  suballocType,
    10443  strategy,
    10444  pAllocation);
    10445  if(res == VK_SUCCESS)
    10446  {
    10447  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10448  return VK_SUCCESS;
    10449  }
    10450  }
    10451  }
    10452  else // WORST_FIT, FIRST_FIT
    10453  {
    10454  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10455  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10456  {
    10457  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10458  VMA_ASSERT(pCurrBlock);
    10459  VkResult res = AllocateFromBlock(
    10460  pCurrBlock,
    10461  hCurrentPool,
    10462  currentFrameIndex,
    10463  size,
    10464  alignment,
    10465  allocFlagsCopy,
    10466  createInfo.pUserData,
    10467  suballocType,
    10468  strategy,
    10469  pAllocation);
    10470  if(res == VK_SUCCESS)
    10471  {
    10472  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10473  return VK_SUCCESS;
    10474  }
    10475  }
    10476  }
    10477  }
    10478 
    10479  // 2. Try to create new block.
    10480  if(canCreateNewBlock)
    10481  {
    10482  // Calculate optimal size for new block.
    10483  VkDeviceSize newBlockSize = m_PreferredBlockSize;
    10484  uint32_t newBlockSizeShift = 0;
    10485  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
    10486 
    10487  if(!m_ExplicitBlockSize)
    10488  {
    10489  // Allocate 1/8, 1/4, 1/2 as first blocks.
    10490  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
    10491  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
    10492  {
    10493  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10494  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
    10495  {
    10496  newBlockSize = smallerNewBlockSize;
    10497  ++newBlockSizeShift;
    10498  }
    10499  else
    10500  {
    10501  break;
    10502  }
    10503  }
    10504  }
    10505 
    10506  size_t newBlockIndex = 0;
    10507  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
    10508  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
    10509  if(!m_ExplicitBlockSize)
    10510  {
    10511  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
    10512  {
    10513  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10514  if(smallerNewBlockSize >= size)
    10515  {
    10516  newBlockSize = smallerNewBlockSize;
    10517  ++newBlockSizeShift;
    10518  res = CreateBlock(newBlockSize, &newBlockIndex);
    10519  }
    10520  else
    10521  {
    10522  break;
    10523  }
    10524  }
    10525  }
    10526 
    10527  if(res == VK_SUCCESS)
    10528  {
    10529  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    10530  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
    10531 
    10532  res = AllocateFromBlock(
    10533  pBlock,
    10534  hCurrentPool,
    10535  currentFrameIndex,
    10536  size,
    10537  alignment,
    10538  allocFlagsCopy,
    10539  createInfo.pUserData,
    10540  suballocType,
    10541  strategy,
    10542  pAllocation);
    10543  if(res == VK_SUCCESS)
    10544  {
    10545  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
    10546  return VK_SUCCESS;
    10547  }
    10548  else
    10549  {
    10550  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
    10551  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10552  }
    10553  }
    10554  }
    10555  }
    10556 
    10557  // 3. Try to allocate from existing blocks with making other allocations lost.
    10558  if(canMakeOtherLost)
    10559  {
    10560  uint32_t tryIndex = 0;
    10561  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    10562  {
    10563  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    10564  VmaAllocationRequest bestRequest = {};
    10565  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    10566 
    10567  // 1. Search existing allocations.
    10569  {
    10570  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10571  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10572  {
    10573  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10574  VMA_ASSERT(pCurrBlock);
    10575  VmaAllocationRequest currRequest = {};
    10576  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10577  currentFrameIndex,
    10578  m_FrameInUseCount,
    10579  m_BufferImageGranularity,
    10580  size,
    10581  alignment,
    10582  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10583  suballocType,
    10584  canMakeOtherLost,
    10585  strategy,
    10586  &currRequest))
    10587  {
    10588  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10589  if(pBestRequestBlock == VMA_NULL ||
    10590  currRequestCost < bestRequestCost)
    10591  {
    10592  pBestRequestBlock = pCurrBlock;
    10593  bestRequest = currRequest;
    10594  bestRequestCost = currRequestCost;
    10595 
    10596  if(bestRequestCost == 0)
    10597  {
    10598  break;
    10599  }
    10600  }
    10601  }
    10602  }
    10603  }
    10604  else // WORST_FIT, FIRST_FIT
    10605  {
    10606  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10607  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10608  {
    10609  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10610  VMA_ASSERT(pCurrBlock);
    10611  VmaAllocationRequest currRequest = {};
    10612  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10613  currentFrameIndex,
    10614  m_FrameInUseCount,
    10615  m_BufferImageGranularity,
    10616  size,
    10617  alignment,
    10618  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10619  suballocType,
    10620  canMakeOtherLost,
    10621  strategy,
    10622  &currRequest))
    10623  {
    10624  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10625  if(pBestRequestBlock == VMA_NULL ||
    10626  currRequestCost < bestRequestCost ||
    10628  {
    10629  pBestRequestBlock = pCurrBlock;
    10630  bestRequest = currRequest;
    10631  bestRequestCost = currRequestCost;
    10632 
    10633  if(bestRequestCost == 0 ||
    10635  {
    10636  break;
    10637  }
    10638  }
    10639  }
    10640  }
    10641  }
    10642 
    10643  if(pBestRequestBlock != VMA_NULL)
    10644  {
    10645  if(mapped)
    10646  {
    10647  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
    10648  if(res != VK_SUCCESS)
    10649  {
    10650  return res;
    10651  }
    10652  }
    10653 
    10654  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
    10655  currentFrameIndex,
    10656  m_FrameInUseCount,
    10657  &bestRequest))
    10658  {
    10659  // We no longer have an empty Allocation.
    10660  if(pBestRequestBlock->m_pMetadata->IsEmpty())
    10661  {
    10662  m_HasEmptyBlock = false;
    10663  }
    10664  // Allocate from this pBlock.
    10665  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10666  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
    10667  (*pAllocation)->InitBlockAllocation(
    10668  hCurrentPool,
    10669  pBestRequestBlock,
    10670  bestRequest.offset,
    10671  alignment,
    10672  size,
    10673  suballocType,
    10674  mapped,
    10675  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10676  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
    10677  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    10678  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    10679  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10680  {
    10681  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10682  }
    10683  if(IsCorruptionDetectionEnabled())
    10684  {
    10685  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
    10686  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10687  }
    10688  return VK_SUCCESS;
    10689  }
    10690  // else: Some allocations must have been touched while we are here. Next try.
    10691  }
    10692  else
    10693  {
    10694  // Could not find place in any of the blocks - break outer loop.
    10695  break;
    10696  }
    10697  }
    10698  /* Maximum number of tries exceeded - a very unlike event when many other
    10699  threads are simultaneously touching allocations making it impossible to make
    10700  lost at the same time as we try to allocate. */
    10701  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    10702  {
    10703  return VK_ERROR_TOO_MANY_OBJECTS;
    10704  }
    10705  }
    10706 
    10707  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10708 }
    10709 
    10710 void VmaBlockVector::Free(
    10711  VmaAllocation hAllocation)
    10712 {
    10713  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    10714 
    10715  // Scope for lock.
    10716  {
    10717  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10718 
    10719  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    10720 
    10721  if(IsCorruptionDetectionEnabled())
    10722  {
    10723  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
    10724  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
    10725  }
    10726 
    10727  if(hAllocation->IsPersistentMap())
    10728  {
    10729  pBlock->Unmap(m_hAllocator, 1);
    10730  }
    10731 
    10732  pBlock->m_pMetadata->Free(hAllocation);
    10733  VMA_HEAVY_ASSERT(pBlock->Validate());
    10734 
    10735  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
    10736 
    10737  // pBlock became empty after this deallocation.
    10738  if(pBlock->m_pMetadata->IsEmpty())
    10739  {
    10740  // Already has empty Allocation. We don't want to have two, so delete this one.
    10741  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    10742  {
    10743  pBlockToDelete = pBlock;
    10744  Remove(pBlock);
    10745  }
    10746  // We now have first empty block.
    10747  else
    10748  {
    10749  m_HasEmptyBlock = true;
    10750  }
    10751  }
    10752  // pBlock didn't become empty, but we have another empty block - find and free that one.
    10753  // (This is optional, heuristics.)
    10754  else if(m_HasEmptyBlock)
    10755  {
    10756  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    10757  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    10758  {
    10759  pBlockToDelete = pLastBlock;
    10760  m_Blocks.pop_back();
    10761  m_HasEmptyBlock = false;
    10762  }
    10763  }
    10764 
    10765  IncrementallySortBlocks();
    10766  }
    10767 
    10768  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    10769  // lock, for performance reason.
    10770  if(pBlockToDelete != VMA_NULL)
    10771  {
    10772  VMA_DEBUG_LOG(" Deleted empty allocation");
    10773  pBlockToDelete->Destroy(m_hAllocator);
    10774  vma_delete(m_hAllocator, pBlockToDelete);
    10775  }
    10776 }
    10777 
    10778 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
    10779 {
    10780  VkDeviceSize result = 0;
    10781  for(size_t i = m_Blocks.size(); i--; )
    10782  {
    10783  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
    10784  if(result >= m_PreferredBlockSize)
    10785  {
    10786  break;
    10787  }
    10788  }
    10789  return result;
    10790 }
    10791 
    10792 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    10793 {
    10794  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10795  {
    10796  if(m_Blocks[blockIndex] == pBlock)
    10797  {
    10798  VmaVectorRemove(m_Blocks, blockIndex);
    10799  return;
    10800  }
    10801  }
    10802  VMA_ASSERT(0);
    10803 }
    10804 
    10805 void VmaBlockVector::IncrementallySortBlocks()
    10806 {
    10807  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10808  {
    10809  // Bubble sort only until first swap.
    10810  for(size_t i = 1; i < m_Blocks.size(); ++i)
    10811  {
    10812  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
    10813  {
    10814  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    10815  return;
    10816  }
    10817  }
    10818  }
    10819 }
    10820 
    10821 VkResult VmaBlockVector::AllocateFromBlock(
    10822  VmaDeviceMemoryBlock* pBlock,
    10823  VmaPool hCurrentPool,
    10824  uint32_t currentFrameIndex,
    10825  VkDeviceSize size,
    10826  VkDeviceSize alignment,
    10827  VmaAllocationCreateFlags allocFlags,
    10828  void* pUserData,
    10829  VmaSuballocationType suballocType,
    10830  uint32_t strategy,
    10831  VmaAllocation* pAllocation)
    10832 {
    10833  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
    10834  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10835  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10836  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10837 
    10838  VmaAllocationRequest currRequest = {};
    10839  if(pBlock->m_pMetadata->CreateAllocationRequest(
    10840  currentFrameIndex,
    10841  m_FrameInUseCount,
    10842  m_BufferImageGranularity,
    10843  size,
    10844  alignment,
    10845  isUpperAddress,
    10846  suballocType,
    10847  false, // canMakeOtherLost
    10848  strategy,
    10849  &currRequest))
    10850  {
    10851  // Allocate from pCurrBlock.
    10852  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    10853 
    10854  if(mapped)
    10855  {
    10856  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
    10857  if(res != VK_SUCCESS)
    10858  {
    10859  return res;
    10860  }
    10861  }
    10862 
    10863  // We no longer have an empty Allocation.
    10864  if(pBlock->m_pMetadata->IsEmpty())
    10865  {
    10866  m_HasEmptyBlock = false;
    10867  }
    10868 
    10869  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10870  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
    10871  (*pAllocation)->InitBlockAllocation(
    10872  hCurrentPool,
    10873  pBlock,
    10874  currRequest.offset,
    10875  alignment,
    10876  size,
    10877  suballocType,
    10878  mapped,
    10879  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10880  VMA_HEAVY_ASSERT(pBlock->Validate());
    10881  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
    10882  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10883  {
    10884  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10885  }
    10886  if(IsCorruptionDetectionEnabled())
    10887  {
    10888  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
    10889  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10890  }
    10891  return VK_SUCCESS;
    10892  }
    10893  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10894 }
    10895 
    10896 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    10897 {
    10898  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    10899  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    10900  allocInfo.allocationSize = blockSize;
    10901  VkDeviceMemory mem = VK_NULL_HANDLE;
    10902  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    10903  if(res < 0)
    10904  {
    10905  return res;
    10906  }
    10907 
    10908  // New VkDeviceMemory successfully created.
    10909 
    10910  // Create new Allocation for it.
    10911  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    10912  pBlock->Init(
    10913  m_hAllocator,
    10914  m_MemoryTypeIndex,
    10915  mem,
    10916  allocInfo.allocationSize,
    10917  m_NextBlockId++,
    10918  m_Algorithm);
    10919 
    10920  m_Blocks.push_back(pBlock);
    10921  if(pNewBlockIndex != VMA_NULL)
    10922  {
    10923  *pNewBlockIndex = m_Blocks.size() - 1;
    10924  }
    10925 
    10926  return VK_SUCCESS;
    10927 }
    10928 
    10929 #if VMA_STATS_STRING_ENABLED
    10930 
    10931 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    10932 {
    10933  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10934 
    10935  json.BeginObject();
    10936 
    10937  if(m_IsCustomPool)
    10938  {
    10939  json.WriteString("MemoryTypeIndex");
    10940  json.WriteNumber(m_MemoryTypeIndex);
    10941 
    10942  json.WriteString("BlockSize");
    10943  json.WriteNumber(m_PreferredBlockSize);
    10944 
    10945  json.WriteString("BlockCount");
    10946  json.BeginObject(true);
    10947  if(m_MinBlockCount > 0)
    10948  {
    10949  json.WriteString("Min");
    10950  json.WriteNumber((uint64_t)m_MinBlockCount);
    10951  }
    10952  if(m_MaxBlockCount < SIZE_MAX)
    10953  {
    10954  json.WriteString("Max");
    10955  json.WriteNumber((uint64_t)m_MaxBlockCount);
    10956  }
    10957  json.WriteString("Cur");
    10958  json.WriteNumber((uint64_t)m_Blocks.size());
    10959  json.EndObject();
    10960 
    10961  if(m_FrameInUseCount > 0)
    10962  {
    10963  json.WriteString("FrameInUseCount");
    10964  json.WriteNumber(m_FrameInUseCount);
    10965  }
    10966 
    10967  if(m_Algorithm != 0)
    10968  {
    10969  json.WriteString("Algorithm");
    10970  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
    10971  }
    10972  }
    10973  else
    10974  {
    10975  json.WriteString("PreferredBlockSize");
    10976  json.WriteNumber(m_PreferredBlockSize);
    10977  }
    10978 
    10979  json.WriteString("Blocks");
    10980  json.BeginObject();
    10981  for(size_t i = 0; i < m_Blocks.size(); ++i)
    10982  {
    10983  json.BeginString();
    10984  json.ContinueString(m_Blocks[i]->GetId());
    10985  json.EndString();
    10986 
    10987  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
    10988  }
    10989  json.EndObject();
    10990 
    10991  json.EndObject();
    10992 }
    10993 
    10994 #endif // #if VMA_STATS_STRING_ENABLED
    10995 
    10996 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
    10997  VmaAllocator hAllocator,
    10998  uint32_t currentFrameIndex)
    10999 {
    11000  if(m_pDefragmentator == VMA_NULL)
    11001  {
    11002  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
    11003  hAllocator,
    11004  this,
    11005  currentFrameIndex);
    11006  }
    11007 
    11008  return m_pDefragmentator;
    11009 }
    11010 
    11011 VkResult VmaBlockVector::Defragment(
    11012  VmaDefragmentationStats* pDefragmentationStats,
    11013  VkDeviceSize& maxBytesToMove,
    11014  uint32_t& maxAllocationsToMove)
    11015 {
    11016  if(m_pDefragmentator == VMA_NULL)
    11017  {
    11018  return VK_SUCCESS;
    11019  }
    11020 
    11021  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11022 
    11023  // Defragment.
    11024  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
    11025 
    11026  // Accumulate statistics.
    11027  if(pDefragmentationStats != VMA_NULL)
    11028  {
    11029  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
    11030  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
    11031  pDefragmentationStats->bytesMoved += bytesMoved;
    11032  pDefragmentationStats->allocationsMoved += allocationsMoved;
    11033  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    11034  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    11035  maxBytesToMove -= bytesMoved;
    11036  maxAllocationsToMove -= allocationsMoved;
    11037  }
    11038 
    11039  // Free empty blocks.
    11040  m_HasEmptyBlock = false;
    11041  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    11042  {
    11043  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    11044  if(pBlock->m_pMetadata->IsEmpty())
    11045  {
    11046  if(m_Blocks.size() > m_MinBlockCount)
    11047  {
    11048  if(pDefragmentationStats != VMA_NULL)
    11049  {
    11050  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    11051  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
    11052  }
    11053 
    11054  VmaVectorRemove(m_Blocks, blockIndex);
    11055  pBlock->Destroy(m_hAllocator);
    11056  vma_delete(m_hAllocator, pBlock);
    11057  }
    11058  else
    11059  {
    11060  m_HasEmptyBlock = true;
    11061  }
    11062  }
    11063  }
    11064 
    11065  return result;
    11066 }
    11067 
    11068 void VmaBlockVector::DestroyDefragmentator()
    11069 {
    11070  if(m_pDefragmentator != VMA_NULL)
    11071  {
    11072  vma_delete(m_hAllocator, m_pDefragmentator);
    11073  m_pDefragmentator = VMA_NULL;
    11074  }
    11075 }
    11076 
    11077 void VmaBlockVector::MakePoolAllocationsLost(
    11078  uint32_t currentFrameIndex,
    11079  size_t* pLostAllocationCount)
    11080 {
    11081  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11082  size_t lostAllocationCount = 0;
    11083  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11084  {
    11085  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11086  VMA_ASSERT(pBlock);
    11087  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    11088  }
    11089  if(pLostAllocationCount != VMA_NULL)
    11090  {
    11091  *pLostAllocationCount = lostAllocationCount;
    11092  }
    11093 }
    11094 
    11095 VkResult VmaBlockVector::CheckCorruption()
    11096 {
    11097  if(!IsCorruptionDetectionEnabled())
    11098  {
    11099  return VK_ERROR_FEATURE_NOT_PRESENT;
    11100  }
    11101 
    11102  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11103  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11104  {
    11105  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11106  VMA_ASSERT(pBlock);
    11107  VkResult res = pBlock->CheckCorruption(m_hAllocator);
    11108  if(res != VK_SUCCESS)
    11109  {
    11110  return res;
    11111  }
    11112  }
    11113  return VK_SUCCESS;
    11114 }
    11115 
    11116 void VmaBlockVector::AddStats(VmaStats* pStats)
    11117 {
    11118  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    11119  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    11120 
    11121  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11122 
    11123  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11124  {
    11125  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11126  VMA_ASSERT(pBlock);
    11127  VMA_HEAVY_ASSERT(pBlock->Validate());
    11128  VmaStatInfo allocationStatInfo;
    11129  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
    11130  VmaAddStatInfo(pStats->total, allocationStatInfo);
    11131  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    11132  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    11133  }
    11134 }
    11135 
    11137 // VmaDefragmentator members definition
    11138 
    11139 VmaDefragmentator::VmaDefragmentator(
    11140  VmaAllocator hAllocator,
    11141  VmaBlockVector* pBlockVector,
    11142  uint32_t currentFrameIndex) :
    11143  m_hAllocator(hAllocator),
    11144  m_pBlockVector(pBlockVector),
    11145  m_CurrentFrameIndex(currentFrameIndex),
    11146  m_BytesMoved(0),
    11147  m_AllocationsMoved(0),
    11148  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
    11149  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    11150 {
    11151  VMA_ASSERT(pBlockVector->GetAlgorithm() == 0);
    11152 }
    11153 
    11154 VmaDefragmentator::~VmaDefragmentator()
    11155 {
    11156  for(size_t i = m_Blocks.size(); i--; )
    11157  {
    11158  vma_delete(m_hAllocator, m_Blocks[i]);
    11159  }
    11160 }
    11161 
    11162 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    11163 {
    11164  AllocationInfo allocInfo;
    11165  allocInfo.m_hAllocation = hAlloc;
    11166  allocInfo.m_pChanged = pChanged;
    11167  m_Allocations.push_back(allocInfo);
    11168 }
    11169 
    11170 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
    11171 {
    11172  // It has already been mapped for defragmentation.
    11173  if(m_pMappedDataForDefragmentation)
    11174  {
    11175  *ppMappedData = m_pMappedDataForDefragmentation;
    11176  return VK_SUCCESS;
    11177  }
    11178 
    11179  // It is originally mapped.
    11180  if(m_pBlock->GetMappedData())
    11181  {
    11182  *ppMappedData = m_pBlock->GetMappedData();
    11183  return VK_SUCCESS;
    11184  }
    11185 
    11186  // Map on first usage.
    11187  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
    11188  *ppMappedData = m_pMappedDataForDefragmentation;
    11189  return res;
    11190 }
    11191 
    11192 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
    11193 {
    11194  if(m_pMappedDataForDefragmentation != VMA_NULL)
    11195  {
    11196  m_pBlock->Unmap(hAllocator, 1);
    11197  }
    11198 }
    11199 
    11200 VkResult VmaDefragmentator::DefragmentRound(
    11201  VkDeviceSize maxBytesToMove,
    11202  uint32_t maxAllocationsToMove)
    11203 {
    11204  if(m_Blocks.empty())
    11205  {
    11206  return VK_SUCCESS;
    11207  }
    11208 
    11209  size_t srcBlockIndex = m_Blocks.size() - 1;
    11210  size_t srcAllocIndex = SIZE_MAX;
    11211  for(;;)
    11212  {
    11213  // 1. Find next allocation to move.
    11214  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    11215  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
    11216  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    11217  {
    11218  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    11219  {
    11220  // Finished: no more allocations to process.
    11221  if(srcBlockIndex == 0)
    11222  {
    11223  return VK_SUCCESS;
    11224  }
    11225  else
    11226  {
    11227  --srcBlockIndex;
    11228  srcAllocIndex = SIZE_MAX;
    11229  }
    11230  }
    11231  else
    11232  {
    11233  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    11234  }
    11235  }
    11236 
    11237  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    11238  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    11239 
    11240  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    11241  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    11242  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    11243  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    11244 
    11245  // 2. Try to find new place for this allocation in preceding or current block.
    11246  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    11247  {
    11248  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    11249  VmaAllocationRequest dstAllocRequest;
    11250  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
    11251  m_CurrentFrameIndex,
    11252  m_pBlockVector->GetFrameInUseCount(),
    11253  m_pBlockVector->GetBufferImageGranularity(),
    11254  size,
    11255  alignment,
    11256  false, // upperAddress
    11257  suballocType,
    11258  false, // canMakeOtherLost
    11260  &dstAllocRequest) &&
    11261  MoveMakesSense(
    11262  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    11263  {
    11264  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    11265 
    11266  // Reached limit on number of allocations or bytes to move.
    11267  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    11268  (m_BytesMoved + size > maxBytesToMove))
    11269  {
    11270  return VK_INCOMPLETE;
    11271  }
    11272 
    11273  void* pDstMappedData = VMA_NULL;
    11274  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
    11275  if(res != VK_SUCCESS)
    11276  {
    11277  return res;
    11278  }
    11279 
    11280  void* pSrcMappedData = VMA_NULL;
    11281  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
    11282  if(res != VK_SUCCESS)
    11283  {
    11284  return res;
    11285  }
    11286 
    11287  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    11288  memcpy(
    11289  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
    11290  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
    11291  static_cast<size_t>(size));
    11292 
    11293  if(VMA_DEBUG_MARGIN > 0)
    11294  {
    11295  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
    11296  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
    11297  }
    11298 
    11299  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
    11300  dstAllocRequest,
    11301  suballocType,
    11302  size,
    11303  false, // upperAddress
    11304  allocInfo.m_hAllocation);
    11305  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
    11306 
    11307  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    11308 
    11309  if(allocInfo.m_pChanged != VMA_NULL)
    11310  {
    11311  *allocInfo.m_pChanged = VK_TRUE;
    11312  }
    11313 
    11314  ++m_AllocationsMoved;
    11315  m_BytesMoved += size;
    11316 
    11317  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    11318 
    11319  break;
    11320  }
    11321  }
    11322 
    11323  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    11324 
    11325  if(srcAllocIndex > 0)
    11326  {
    11327  --srcAllocIndex;
    11328  }
    11329  else
    11330  {
    11331  if(srcBlockIndex > 0)
    11332  {
    11333  --srcBlockIndex;
    11334  srcAllocIndex = SIZE_MAX;
    11335  }
    11336  else
    11337  {
    11338  return VK_SUCCESS;
    11339  }
    11340  }
    11341  }
    11342 }
    11343 
    11344 VkResult VmaDefragmentator::Defragment(
    11345  VkDeviceSize maxBytesToMove,
    11346  uint32_t maxAllocationsToMove)
    11347 {
    11348  if(m_Allocations.empty())
    11349  {
    11350  return VK_SUCCESS;
    11351  }
    11352 
    11353  // Create block info for each block.
    11354  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    11355  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11356  {
    11357  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    11358  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    11359  m_Blocks.push_back(pBlockInfo);
    11360  }
    11361 
    11362  // Sort them by m_pBlock pointer value.
    11363  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    11364 
    11365  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
    11366  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
    11367  {
    11368  AllocationInfo& allocInfo = m_Allocations[blockIndex];
    11369  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    11370  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    11371  {
    11372  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
    11373  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    11374  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    11375  {
    11376  (*it)->m_Allocations.push_back(allocInfo);
    11377  }
    11378  else
    11379  {
    11380  VMA_ASSERT(0);
    11381  }
    11382  }
    11383  }
    11384  m_Allocations.clear();
    11385 
    11386  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11387  {
    11388  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    11389  pBlockInfo->CalcHasNonMovableAllocations();
    11390  pBlockInfo->SortAllocationsBySizeDescecnding();
    11391  }
    11392 
    11393  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    11394  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    11395 
    11396  // Execute defragmentation rounds (the main part).
    11397  VkResult result = VK_SUCCESS;
    11398  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
    11399  {
    11400  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
    11401  }
    11402 
    11403  // Unmap blocks that were mapped for defragmentation.
    11404  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11405  {
    11406  m_Blocks[blockIndex]->Unmap(m_hAllocator);
    11407  }
    11408 
    11409  return result;
    11410 }
    11411 
    11412 bool VmaDefragmentator::MoveMakesSense(
    11413  size_t dstBlockIndex, VkDeviceSize dstOffset,
    11414  size_t srcBlockIndex, VkDeviceSize srcOffset)
    11415 {
    11416  if(dstBlockIndex < srcBlockIndex)
    11417  {
    11418  return true;
    11419  }
    11420  if(dstBlockIndex > srcBlockIndex)
    11421  {
    11422  return false;
    11423  }
    11424  if(dstOffset < srcOffset)
    11425  {
    11426  return true;
    11427  }
    11428  return false;
    11429 }
    11430 
    11432 // VmaRecorder
    11433 
    11434 #if VMA_RECORDING_ENABLED
    11435 
    11436 VmaRecorder::VmaRecorder() :
    11437  m_UseMutex(true),
    11438  m_Flags(0),
    11439  m_File(VMA_NULL),
    11440  m_Freq(INT64_MAX),
    11441  m_StartCounter(INT64_MAX)
    11442 {
    11443 }
    11444 
    11445 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
    11446 {
    11447  m_UseMutex = useMutex;
    11448  m_Flags = settings.flags;
    11449 
    11450  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
    11451  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
    11452 
    11453  // Open file for writing.
    11454  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
    11455  if(err != 0)
    11456  {
    11457  return VK_ERROR_INITIALIZATION_FAILED;
    11458  }
    11459 
    11460  // Write header.
    11461  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
    11462  fprintf(m_File, "%s\n", "1,3");
    11463 
    11464  return VK_SUCCESS;
    11465 }
    11466 
    11467 VmaRecorder::~VmaRecorder()
    11468 {
    11469  if(m_File != VMA_NULL)
    11470  {
    11471  fclose(m_File);
    11472  }
    11473 }
    11474 
    11475 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
    11476 {
    11477  CallParams callParams;
    11478  GetBasicParams(callParams);
    11479 
    11480  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11481  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11482  Flush();
    11483 }
    11484 
    11485 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
    11486 {
    11487  CallParams callParams;
    11488  GetBasicParams(callParams);
    11489 
    11490  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11491  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11492  Flush();
    11493 }
    11494 
    11495 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
    11496 {
    11497  CallParams callParams;
    11498  GetBasicParams(callParams);
    11499 
    11500  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11501  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
    11502  createInfo.memoryTypeIndex,
    11503  createInfo.flags,
    11504  createInfo.blockSize,
    11505  (uint64_t)createInfo.minBlockCount,
    11506  (uint64_t)createInfo.maxBlockCount,
    11507  createInfo.frameInUseCount,
    11508  pool);
    11509  Flush();
    11510 }
    11511 
    11512 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
    11513 {
    11514  CallParams callParams;
    11515  GetBasicParams(callParams);
    11516 
    11517  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11518  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
    11519  pool);
    11520  Flush();
    11521 }
    11522 
    11523 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
    11524  const VkMemoryRequirements& vkMemReq,
    11525  const VmaAllocationCreateInfo& createInfo,
    11526  VmaAllocation allocation)
    11527 {
    11528  CallParams callParams;
    11529  GetBasicParams(callParams);
    11530 
    11531  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11532  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11533  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11534  vkMemReq.size,
    11535  vkMemReq.alignment,
    11536  vkMemReq.memoryTypeBits,
    11537  createInfo.flags,
    11538  createInfo.usage,
    11539  createInfo.requiredFlags,
    11540  createInfo.preferredFlags,
    11541  createInfo.memoryTypeBits,
    11542  createInfo.pool,
    11543  allocation,
    11544  userDataStr.GetString());
    11545  Flush();
    11546 }
    11547 
    11548 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    11549  const VkMemoryRequirements& vkMemReq,
    11550  bool requiresDedicatedAllocation,
    11551  bool prefersDedicatedAllocation,
    11552  const VmaAllocationCreateInfo& createInfo,
    11553  VmaAllocation allocation)
    11554 {
    11555  CallParams callParams;
    11556  GetBasicParams(callParams);
    11557 
    11558  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11559  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11560  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11561  vkMemReq.size,
    11562  vkMemReq.alignment,
    11563  vkMemReq.memoryTypeBits,
    11564  requiresDedicatedAllocation ? 1 : 0,
    11565  prefersDedicatedAllocation ? 1 : 0,
    11566  createInfo.flags,
    11567  createInfo.usage,
    11568  createInfo.requiredFlags,
    11569  createInfo.preferredFlags,
    11570  createInfo.memoryTypeBits,
    11571  createInfo.pool,
    11572  allocation,
    11573  userDataStr.GetString());
    11574  Flush();
    11575 }
    11576 
    11577 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
    11578  const VkMemoryRequirements& vkMemReq,
    11579  bool requiresDedicatedAllocation,
    11580  bool prefersDedicatedAllocation,
    11581  const VmaAllocationCreateInfo& createInfo,
    11582  VmaAllocation allocation)
    11583 {
    11584  CallParams callParams;
    11585  GetBasicParams(callParams);
    11586 
    11587  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11588  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11589  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11590  vkMemReq.size,
    11591  vkMemReq.alignment,
    11592  vkMemReq.memoryTypeBits,
    11593  requiresDedicatedAllocation ? 1 : 0,
    11594  prefersDedicatedAllocation ? 1 : 0,
    11595  createInfo.flags,
    11596  createInfo.usage,
    11597  createInfo.requiredFlags,
    11598  createInfo.preferredFlags,
    11599  createInfo.memoryTypeBits,
    11600  createInfo.pool,
    11601  allocation,
    11602  userDataStr.GetString());
    11603  Flush();
    11604 }
    11605 
    11606 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
    11607  VmaAllocation allocation)
    11608 {
    11609  CallParams callParams;
    11610  GetBasicParams(callParams);
    11611 
    11612  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11613  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11614  allocation);
    11615  Flush();
    11616 }
    11617 
    11618 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
    11619  VmaAllocation allocation,
    11620  const void* pUserData)
    11621 {
    11622  CallParams callParams;
    11623  GetBasicParams(callParams);
    11624 
    11625  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11626  UserDataString userDataStr(
    11627  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
    11628  pUserData);
    11629  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11630  allocation,
    11631  userDataStr.GetString());
    11632  Flush();
    11633 }
    11634 
    11635 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
    11636  VmaAllocation allocation)
    11637 {
    11638  CallParams callParams;
    11639  GetBasicParams(callParams);
    11640 
    11641  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11642  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11643  allocation);
    11644  Flush();
    11645 }
    11646 
    11647 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
    11648  VmaAllocation allocation)
    11649 {
    11650  CallParams callParams;
    11651  GetBasicParams(callParams);
    11652 
    11653  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11654  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11655  allocation);
    11656  Flush();
    11657 }
    11658 
    11659 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
    11660  VmaAllocation allocation)
    11661 {
    11662  CallParams callParams;
    11663  GetBasicParams(callParams);
    11664 
    11665  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11666  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11667  allocation);
    11668  Flush();
    11669 }
    11670 
    11671 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
    11672  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11673 {
    11674  CallParams callParams;
    11675  GetBasicParams(callParams);
    11676 
    11677  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11678  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11679  allocation,
    11680  offset,
    11681  size);
    11682  Flush();
    11683 }
    11684 
    11685 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
    11686  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11687 {
    11688  CallParams callParams;
    11689  GetBasicParams(callParams);
    11690 
    11691  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11692  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11693  allocation,
    11694  offset,
    11695  size);
    11696  Flush();
    11697 }
    11698 
    11699 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
    11700  const VkBufferCreateInfo& bufCreateInfo,
    11701  const VmaAllocationCreateInfo& allocCreateInfo,
    11702  VmaAllocation allocation)
    11703 {
    11704  CallParams callParams;
    11705  GetBasicParams(callParams);
    11706 
    11707  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11708  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11709  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11710  bufCreateInfo.flags,
    11711  bufCreateInfo.size,
    11712  bufCreateInfo.usage,
    11713  bufCreateInfo.sharingMode,
    11714  allocCreateInfo.flags,
    11715  allocCreateInfo.usage,
    11716  allocCreateInfo.requiredFlags,
    11717  allocCreateInfo.preferredFlags,
    11718  allocCreateInfo.memoryTypeBits,
    11719  allocCreateInfo.pool,
    11720  allocation,
    11721  userDataStr.GetString());
    11722  Flush();
    11723 }
    11724 
    11725 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
    11726  const VkImageCreateInfo& imageCreateInfo,
    11727  const VmaAllocationCreateInfo& allocCreateInfo,
    11728  VmaAllocation allocation)
    11729 {
    11730  CallParams callParams;
    11731  GetBasicParams(callParams);
    11732 
    11733  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11734  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11735  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11736  imageCreateInfo.flags,
    11737  imageCreateInfo.imageType,
    11738  imageCreateInfo.format,
    11739  imageCreateInfo.extent.width,
    11740  imageCreateInfo.extent.height,
    11741  imageCreateInfo.extent.depth,
    11742  imageCreateInfo.mipLevels,
    11743  imageCreateInfo.arrayLayers,
    11744  imageCreateInfo.samples,
    11745  imageCreateInfo.tiling,
    11746  imageCreateInfo.usage,
    11747  imageCreateInfo.sharingMode,
    11748  imageCreateInfo.initialLayout,
    11749  allocCreateInfo.flags,
    11750  allocCreateInfo.usage,
    11751  allocCreateInfo.requiredFlags,
    11752  allocCreateInfo.preferredFlags,
    11753  allocCreateInfo.memoryTypeBits,
    11754  allocCreateInfo.pool,
    11755  allocation,
    11756  userDataStr.GetString());
    11757  Flush();
    11758 }
    11759 
    11760 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
    11761  VmaAllocation allocation)
    11762 {
    11763  CallParams callParams;
    11764  GetBasicParams(callParams);
    11765 
    11766  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11767  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
    11768  allocation);
    11769  Flush();
    11770 }
    11771 
    11772 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
    11773  VmaAllocation allocation)
    11774 {
    11775  CallParams callParams;
    11776  GetBasicParams(callParams);
    11777 
    11778  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11779  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
    11780  allocation);
    11781  Flush();
    11782 }
    11783 
    11784 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
    11785  VmaAllocation allocation)
    11786 {
    11787  CallParams callParams;
    11788  GetBasicParams(callParams);
    11789 
    11790  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11791  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11792  allocation);
    11793  Flush();
    11794 }
    11795 
    11796 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
    11797  VmaAllocation allocation)
    11798 {
    11799  CallParams callParams;
    11800  GetBasicParams(callParams);
    11801 
    11802  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11803  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
    11804  allocation);
    11805  Flush();
    11806 }
    11807 
    11808 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
    11809  VmaPool pool)
    11810 {
    11811  CallParams callParams;
    11812  GetBasicParams(callParams);
    11813 
    11814  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11815  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
    11816  pool);
    11817  Flush();
    11818 }
    11819 
    11820 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
    11821 {
    11822  if(pUserData != VMA_NULL)
    11823  {
    11824  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
    11825  {
    11826  m_Str = (const char*)pUserData;
    11827  }
    11828  else
    11829  {
    11830  sprintf_s(m_PtrStr, "%p", pUserData);
    11831  m_Str = m_PtrStr;
    11832  }
    11833  }
    11834  else
    11835  {
    11836  m_Str = "";
    11837  }
    11838 }
    11839 
    11840 void VmaRecorder::WriteConfiguration(
    11841  const VkPhysicalDeviceProperties& devProps,
    11842  const VkPhysicalDeviceMemoryProperties& memProps,
    11843  bool dedicatedAllocationExtensionEnabled)
    11844 {
    11845  fprintf(m_File, "Config,Begin\n");
    11846 
    11847  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
    11848  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
    11849  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
    11850  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
    11851  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
    11852  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
    11853 
    11854  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
    11855  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
    11856  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
    11857 
    11858  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
    11859  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
    11860  {
    11861  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
    11862  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
    11863  }
    11864  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
    11865  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
    11866  {
    11867  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
    11868  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
    11869  }
    11870 
    11871  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
    11872 
    11873  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
    11874  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
    11875  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
    11876  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
    11877  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
    11878  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
    11879  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
    11880  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
    11881  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    11882 
    11883  fprintf(m_File, "Config,End\n");
    11884 }
    11885 
    11886 void VmaRecorder::GetBasicParams(CallParams& outParams)
    11887 {
    11888  outParams.threadId = GetCurrentThreadId();
    11889 
    11890  LARGE_INTEGER counter;
    11891  QueryPerformanceCounter(&counter);
    11892  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
    11893 }
    11894 
    11895 void VmaRecorder::Flush()
    11896 {
    11897  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
    11898  {
    11899  fflush(m_File);
    11900  }
    11901 }
    11902 
    11903 #endif // #if VMA_RECORDING_ENABLED
    11904 
    11906 // VmaAllocator_T
    11907 
    11908 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    11909  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    11910  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    11911  m_hDevice(pCreateInfo->device),
    11912  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    11913  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    11914  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    11915  m_PreferredLargeHeapBlockSize(0),
    11916  m_PhysicalDevice(pCreateInfo->physicalDevice),
    11917  m_CurrentFrameIndex(0),
    11918  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
    11919  m_NextPoolId(0)
    11921  ,m_pRecorder(VMA_NULL)
    11922 #endif
    11923 {
    11924  if(VMA_DEBUG_DETECT_CORRUPTION)
    11925  {
    11926  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
    11927  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
    11928  }
    11929 
    11930  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    11931 
    11932 #if !(VMA_DEDICATED_ALLOCATION)
    11934  {
    11935  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
    11936  }
    11937 #endif
    11938 
    11939  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    11940  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    11941  memset(&m_MemProps, 0, sizeof(m_MemProps));
    11942 
    11943  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    11944  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    11945 
    11946  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    11947  {
    11948  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    11949  }
    11950 
    11951  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    11952  {
    11953  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    11954  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    11955  }
    11956 
    11957  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    11958 
    11959  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    11960  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    11961 
    11962  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
    11963  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
    11964  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
    11965  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
    11966 
    11967  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    11968  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    11969 
    11970  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    11971  {
    11972  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    11973  {
    11974  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    11975  if(limit != VK_WHOLE_SIZE)
    11976  {
    11977  m_HeapSizeLimit[heapIndex] = limit;
    11978  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    11979  {
    11980  m_MemProps.memoryHeaps[heapIndex].size = limit;
    11981  }
    11982  }
    11983  }
    11984  }
    11985 
    11986  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    11987  {
    11988  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    11989 
    11990  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    11991  this,
    11992  memTypeIndex,
    11993  preferredBlockSize,
    11994  0,
    11995  SIZE_MAX,
    11996  GetBufferImageGranularity(),
    11997  pCreateInfo->frameInUseCount,
    11998  false, // isCustomPool
    11999  false, // explicitBlockSize
    12000  false); // linearAlgorithm
    12001  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    12002  // becase minBlockCount is 0.
    12003  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    12004 
    12005  }
    12006 }
    12007 
    12008 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
    12009 {
    12010  VkResult res = VK_SUCCESS;
    12011 
    12012  if(pCreateInfo->pRecordSettings != VMA_NULL &&
    12013  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
    12014  {
    12015 #if VMA_RECORDING_ENABLED
    12016  m_pRecorder = vma_new(this, VmaRecorder)();
    12017  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
    12018  if(res != VK_SUCCESS)
    12019  {
    12020  return res;
    12021  }
    12022  m_pRecorder->WriteConfiguration(
    12023  m_PhysicalDeviceProperties,
    12024  m_MemProps,
    12025  m_UseKhrDedicatedAllocation);
    12026  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
    12027 #else
    12028  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
    12029  return VK_ERROR_FEATURE_NOT_PRESENT;
    12030 #endif
    12031  }
    12032 
    12033  return res;
    12034 }
    12035 
    12036 VmaAllocator_T::~VmaAllocator_T()
    12037 {
    12038 #if VMA_RECORDING_ENABLED
    12039  if(m_pRecorder != VMA_NULL)
    12040  {
    12041  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
    12042  vma_delete(this, m_pRecorder);
    12043  }
    12044 #endif
    12045 
    12046  VMA_ASSERT(m_Pools.empty());
    12047 
    12048  for(size_t i = GetMemoryTypeCount(); i--; )
    12049  {
    12050  vma_delete(this, m_pDedicatedAllocations[i]);
    12051  vma_delete(this, m_pBlockVectors[i]);
    12052  }
    12053 }
    12054 
    12055 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    12056 {
    12057 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    12058  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
    12059  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
    12060  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    12061  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
    12062  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
    12063  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
    12064  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
    12065  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
    12066  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
    12067  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
    12068  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
    12069  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
    12070  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
    12071  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
    12072  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
    12073  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
    12074 #if VMA_DEDICATED_ALLOCATION
    12075  if(m_UseKhrDedicatedAllocation)
    12076  {
    12077  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
    12078  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
    12079  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
    12080  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
    12081  }
    12082 #endif // #if VMA_DEDICATED_ALLOCATION
    12083 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    12084 
    12085 #define VMA_COPY_IF_NOT_NULL(funcName) \
    12086  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    12087 
    12088  if(pVulkanFunctions != VMA_NULL)
    12089  {
    12090  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    12091  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    12092  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    12093  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    12094  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    12095  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    12096  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
    12097  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
    12098  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    12099  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    12100  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    12101  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    12102  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    12103  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    12104  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    12105  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    12106 #if VMA_DEDICATED_ALLOCATION
    12107  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    12108  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    12109 #endif
    12110  }
    12111 
    12112 #undef VMA_COPY_IF_NOT_NULL
    12113 
    12114  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    12115  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    12116  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    12117  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    12118  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    12119  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    12120  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    12121  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    12122  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
    12123  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
    12124  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    12125  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    12126  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    12127  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    12128  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    12129  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    12130  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    12131  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    12132 #if VMA_DEDICATED_ALLOCATION
    12133  if(m_UseKhrDedicatedAllocation)
    12134  {
    12135  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    12136  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    12137  }
    12138 #endif
    12139 }
    12140 
    12141 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    12142 {
    12143  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12144  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    12145  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
    12146  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
    12147 }
    12148 
    12149 VkResult VmaAllocator_T::AllocateMemoryOfType(
    12150  VkDeviceSize size,
    12151  VkDeviceSize alignment,
    12152  bool dedicatedAllocation,
    12153  VkBuffer dedicatedBuffer,
    12154  VkImage dedicatedImage,
    12155  const VmaAllocationCreateInfo& createInfo,
    12156  uint32_t memTypeIndex,
    12157  VmaSuballocationType suballocType,
    12158  size_t allocationCount,
    12159  VmaAllocation* pAllocations)
    12160 {
    12161  VMA_ASSERT(pAllocations != VMA_NULL);
    12162  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, vkMemReq.size);
    12163 
    12164  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    12165 
    12166  // If memory type is not HOST_VISIBLE, disable MAPPED.
    12167  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12168  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    12169  {
    12170  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    12171  }
    12172 
    12173  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    12174  VMA_ASSERT(blockVector);
    12175 
    12176  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    12177  bool preferDedicatedMemory =
    12178  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    12179  dedicatedAllocation ||
    12180  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    12181  size > preferredBlockSize / 2;
    12182 
    12183  if(preferDedicatedMemory &&
    12184  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    12185  finalCreateInfo.pool == VK_NULL_HANDLE)
    12186  {
    12188  }
    12189 
    12190  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    12191  {
    12192  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12193  {
    12194  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12195  }
    12196  else
    12197  {
    12198  return AllocateDedicatedMemory(
    12199  size,
    12200  suballocType,
    12201  memTypeIndex,
    12202  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12203  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12204  finalCreateInfo.pUserData,
    12205  dedicatedBuffer,
    12206  dedicatedImage,
    12207  allocationCount,
    12208  pAllocations);
    12209  }
    12210  }
    12211  else
    12212  {
    12213  VkResult res = blockVector->Allocate(
    12214  VK_NULL_HANDLE, // hCurrentPool
    12215  m_CurrentFrameIndex.load(),
    12216  size,
    12217  alignment,
    12218  finalCreateInfo,
    12219  suballocType,
    12220  allocationCount,
    12221  pAllocations);
    12222  if(res == VK_SUCCESS)
    12223  {
    12224  return res;
    12225  }
    12226 
    12227  // 5. Try dedicated memory.
    12228  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12229  {
    12230  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12231  }
    12232  else
    12233  {
    12234  res = AllocateDedicatedMemory(
    12235  size,
    12236  suballocType,
    12237  memTypeIndex,
    12238  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12239  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12240  finalCreateInfo.pUserData,
    12241  dedicatedBuffer,
    12242  dedicatedImage,
    12243  allocationCount,
    12244  pAllocations);
    12245  if(res == VK_SUCCESS)
    12246  {
    12247  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    12248  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    12249  return VK_SUCCESS;
    12250  }
    12251  else
    12252  {
    12253  // Everything failed: Return error code.
    12254  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12255  return res;
    12256  }
    12257  }
    12258  }
    12259 }
    12260 
    12261 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    12262  VkDeviceSize size,
    12263  VmaSuballocationType suballocType,
    12264  uint32_t memTypeIndex,
    12265  bool map,
    12266  bool isUserDataString,
    12267  void* pUserData,
    12268  VkBuffer dedicatedBuffer,
    12269  VkImage dedicatedImage,
    12270  size_t allocationCount,
    12271  VmaAllocation* pAllocations)
    12272 {
    12273  VMA_ASSERT(allocationCount > 0 && pAllocations);
    12274 
    12275  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    12276  allocInfo.memoryTypeIndex = memTypeIndex;
    12277  allocInfo.allocationSize = size;
    12278 
    12279 #if VMA_DEDICATED_ALLOCATION
    12280  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    12281  if(m_UseKhrDedicatedAllocation)
    12282  {
    12283  if(dedicatedBuffer != VK_NULL_HANDLE)
    12284  {
    12285  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    12286  dedicatedAllocInfo.buffer = dedicatedBuffer;
    12287  allocInfo.pNext = &dedicatedAllocInfo;
    12288  }
    12289  else if(dedicatedImage != VK_NULL_HANDLE)
    12290  {
    12291  dedicatedAllocInfo.image = dedicatedImage;
    12292  allocInfo.pNext = &dedicatedAllocInfo;
    12293  }
    12294  }
    12295 #endif // #if VMA_DEDICATED_ALLOCATION
    12296 
    12297  size_t allocIndex;
    12298  VkResult res;
    12299  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    12300  {
    12301  res = AllocateDedicatedMemoryPage(
    12302  size,
    12303  suballocType,
    12304  memTypeIndex,
    12305  allocInfo,
    12306  map,
    12307  isUserDataString,
    12308  pUserData,
    12309  pAllocations + allocIndex);
    12310  if(res != VK_SUCCESS)
    12311  {
    12312  break;
    12313  }
    12314  }
    12315 
    12316  if(res == VK_SUCCESS)
    12317  {
    12318  // Register them in m_pDedicatedAllocations.
    12319  {
    12320  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12321  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    12322  VMA_ASSERT(pDedicatedAllocations);
    12323  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    12324  {
    12325  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
    12326  }
    12327  }
    12328 
    12329  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
    12330  }
    12331  else
    12332  {
    12333  // Free all already created allocations.
    12334  while(allocIndex--)
    12335  {
    12336  VmaAllocation currAlloc = pAllocations[allocIndex];
    12337  VkDeviceMemory hMemory = currAlloc->GetMemory();
    12338 
    12339  /*
    12340  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    12341  before vkFreeMemory.
    12342 
    12343  if(currAlloc->GetMappedData() != VMA_NULL)
    12344  {
    12345  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    12346  }
    12347  */
    12348 
    12349  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
    12350 
    12351  currAlloc->SetUserData(this, VMA_NULL);
    12352  vma_delete(this, currAlloc);
    12353  }
    12354 
    12355  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
    12356  }
    12357 
    12358  return res;
    12359 }
    12360 
    12361 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
    12362  VkDeviceSize size,
    12363  VmaSuballocationType suballocType,
    12364  uint32_t memTypeIndex,
    12365  const VkMemoryAllocateInfo& allocInfo,
    12366  bool map,
    12367  bool isUserDataString,
    12368  void* pUserData,
    12369  VmaAllocation* pAllocation)
    12370 {
    12371  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    12372  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    12373  if(res < 0)
    12374  {
    12375  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12376  return res;
    12377  }
    12378 
    12379  void* pMappedData = VMA_NULL;
    12380  if(map)
    12381  {
    12382  res = (*m_VulkanFunctions.vkMapMemory)(
    12383  m_hDevice,
    12384  hMemory,
    12385  0,
    12386  VK_WHOLE_SIZE,
    12387  0,
    12388  &pMappedData);
    12389  if(res < 0)
    12390  {
    12391  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    12392  FreeVulkanMemory(memTypeIndex, size, hMemory);
    12393  return res;
    12394  }
    12395  }
    12396 
    12397  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
    12398  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
    12399  (*pAllocation)->SetUserData(this, pUserData);
    12400  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12401  {
    12402  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    12403  }
    12404 
    12405  return VK_SUCCESS;
    12406 }
    12407 
    12408 void VmaAllocator_T::GetBufferMemoryRequirements(
    12409  VkBuffer hBuffer,
    12410  VkMemoryRequirements& memReq,
    12411  bool& requiresDedicatedAllocation,
    12412  bool& prefersDedicatedAllocation) const
    12413 {
    12414 #if VMA_DEDICATED_ALLOCATION
    12415  if(m_UseKhrDedicatedAllocation)
    12416  {
    12417  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12418  memReqInfo.buffer = hBuffer;
    12419 
    12420  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12421 
    12422  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12423  memReq2.pNext = &memDedicatedReq;
    12424 
    12425  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12426 
    12427  memReq = memReq2.memoryRequirements;
    12428  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12429  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12430  }
    12431  else
    12432 #endif // #if VMA_DEDICATED_ALLOCATION
    12433  {
    12434  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    12435  requiresDedicatedAllocation = false;
    12436  prefersDedicatedAllocation = false;
    12437  }
    12438 }
    12439 
    12440 void VmaAllocator_T::GetImageMemoryRequirements(
    12441  VkImage hImage,
    12442  VkMemoryRequirements& memReq,
    12443  bool& requiresDedicatedAllocation,
    12444  bool& prefersDedicatedAllocation) const
    12445 {
    12446 #if VMA_DEDICATED_ALLOCATION
    12447  if(m_UseKhrDedicatedAllocation)
    12448  {
    12449  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12450  memReqInfo.image = hImage;
    12451 
    12452  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12453 
    12454  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12455  memReq2.pNext = &memDedicatedReq;
    12456 
    12457  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12458 
    12459  memReq = memReq2.memoryRequirements;
    12460  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12461  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12462  }
    12463  else
    12464 #endif // #if VMA_DEDICATED_ALLOCATION
    12465  {
    12466  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    12467  requiresDedicatedAllocation = false;
    12468  prefersDedicatedAllocation = false;
    12469  }
    12470 }
    12471 
    12472 VkResult VmaAllocator_T::AllocateMemory(
    12473  const VkMemoryRequirements& vkMemReq,
    12474  bool requiresDedicatedAllocation,
    12475  bool prefersDedicatedAllocation,
    12476  VkBuffer dedicatedBuffer,
    12477  VkImage dedicatedImage,
    12478  const VmaAllocationCreateInfo& createInfo,
    12479  VmaSuballocationType suballocType,
    12480  size_t allocationCount,
    12481  VmaAllocation* pAllocations)
    12482 {
    12483  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
    12484 
    12485  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
    12486 
    12487  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    12488  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12489  {
    12490  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    12491  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12492  }
    12493  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12495  {
    12496  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    12497  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12498  }
    12499  if(requiresDedicatedAllocation)
    12500  {
    12501  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12502  {
    12503  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    12504  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12505  }
    12506  if(createInfo.pool != VK_NULL_HANDLE)
    12507  {
    12508  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    12509  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12510  }
    12511  }
    12512  if((createInfo.pool != VK_NULL_HANDLE) &&
    12513  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    12514  {
    12515  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    12516  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12517  }
    12518 
    12519  if(createInfo.pool != VK_NULL_HANDLE)
    12520  {
    12521  const VkDeviceSize alignmentForPool = VMA_MAX(
    12522  vkMemReq.alignment,
    12523  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
    12524  return createInfo.pool->m_BlockVector.Allocate(
    12525  createInfo.pool,
    12526  m_CurrentFrameIndex.load(),
    12527  vkMemReq.size,
    12528  alignmentForPool,
    12529  createInfo,
    12530  suballocType,
    12531  allocationCount,
    12532  pAllocations);
    12533  }
    12534  else
    12535  {
    12536  // Bit mask of memory Vulkan types acceptable for this allocation.
    12537  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    12538  uint32_t memTypeIndex = UINT32_MAX;
    12539  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12540  if(res == VK_SUCCESS)
    12541  {
    12542  VkDeviceSize alignmentForMemType = VMA_MAX(
    12543  vkMemReq.alignment,
    12544  GetMemoryTypeMinAlignment(memTypeIndex));
    12545 
    12546  res = AllocateMemoryOfType(
    12547  vkMemReq.size,
    12548  alignmentForMemType,
    12549  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12550  dedicatedBuffer,
    12551  dedicatedImage,
    12552  createInfo,
    12553  memTypeIndex,
    12554  suballocType,
    12555  allocationCount,
    12556  pAllocations);
    12557  // Succeeded on first try.
    12558  if(res == VK_SUCCESS)
    12559  {
    12560  return res;
    12561  }
    12562  // Allocation from this memory type failed. Try other compatible memory types.
    12563  else
    12564  {
    12565  for(;;)
    12566  {
    12567  // Remove old memTypeIndex from list of possibilities.
    12568  memoryTypeBits &= ~(1u << memTypeIndex);
    12569  // Find alternative memTypeIndex.
    12570  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12571  if(res == VK_SUCCESS)
    12572  {
    12573  alignmentForMemType = VMA_MAX(
    12574  vkMemReq.alignment,
    12575  GetMemoryTypeMinAlignment(memTypeIndex));
    12576 
    12577  res = AllocateMemoryOfType(
    12578  vkMemReq.size,
    12579  alignmentForMemType,
    12580  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12581  dedicatedBuffer,
    12582  dedicatedImage,
    12583  createInfo,
    12584  memTypeIndex,
    12585  suballocType,
    12586  allocationCount,
    12587  pAllocations);
    12588  // Allocation from this alternative memory type succeeded.
    12589  if(res == VK_SUCCESS)
    12590  {
    12591  return res;
    12592  }
    12593  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    12594  }
    12595  // No other matching memory type index could be found.
    12596  else
    12597  {
    12598  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    12599  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12600  }
    12601  }
    12602  }
    12603  }
    12604  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    12605  else
    12606  return res;
    12607  }
    12608 }
    12609 
    12610 void VmaAllocator_T::FreeMemory(
    12611  size_t allocationCount,
    12612  const VmaAllocation* pAllocations)
    12613 {
    12614  VMA_ASSERT(pAllocations);
    12615 
    12616  for(size_t allocIndex = allocationCount; allocIndex--; )
    12617  {
    12618  VmaAllocation allocation = pAllocations[allocIndex];
    12619 
    12620  if(allocation != VK_NULL_HANDLE)
    12621  {
    12622  if(TouchAllocation(allocation))
    12623  {
    12624  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12625  {
    12626  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
    12627  }
    12628 
    12629  switch(allocation->GetType())
    12630  {
    12631  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12632  {
    12633  VmaBlockVector* pBlockVector = VMA_NULL;
    12634  VmaPool hPool = allocation->GetPool();
    12635  if(hPool != VK_NULL_HANDLE)
    12636  {
    12637  pBlockVector = &hPool->m_BlockVector;
    12638  }
    12639  else
    12640  {
    12641  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    12642  pBlockVector = m_pBlockVectors[memTypeIndex];
    12643  }
    12644  pBlockVector->Free(allocation);
    12645  }
    12646  break;
    12647  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12648  FreeDedicatedMemory(allocation);
    12649  break;
    12650  default:
    12651  VMA_ASSERT(0);
    12652  }
    12653  }
    12654 
    12655  allocation->SetUserData(this, VMA_NULL);
    12656  vma_delete(this, allocation);
    12657  }
    12658  }
    12659 }
    12660 
    12661 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    12662 {
    12663  // Initialize.
    12664  InitStatInfo(pStats->total);
    12665  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    12666  InitStatInfo(pStats->memoryType[i]);
    12667  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    12668  InitStatInfo(pStats->memoryHeap[i]);
    12669 
    12670  // Process default pools.
    12671  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12672  {
    12673  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    12674  VMA_ASSERT(pBlockVector);
    12675  pBlockVector->AddStats(pStats);
    12676  }
    12677 
    12678  // Process custom pools.
    12679  {
    12680  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12681  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    12682  {
    12683  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
    12684  }
    12685  }
    12686 
    12687  // Process dedicated allocations.
    12688  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12689  {
    12690  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12691  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12692  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    12693  VMA_ASSERT(pDedicatedAllocVector);
    12694  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    12695  {
    12696  VmaStatInfo allocationStatInfo;
    12697  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    12698  VmaAddStatInfo(pStats->total, allocationStatInfo);
    12699  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    12700  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    12701  }
    12702  }
    12703 
    12704  // Postprocess.
    12705  VmaPostprocessCalcStatInfo(pStats->total);
    12706  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    12707  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    12708  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    12709  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    12710 }
    12711 
    12712 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    12713 
    12714 VkResult VmaAllocator_T::Defragment(
    12715  VmaAllocation* pAllocations,
    12716  size_t allocationCount,
    12717  VkBool32* pAllocationsChanged,
    12718  const VmaDefragmentationInfo* pDefragmentationInfo,
    12719  VmaDefragmentationStats* pDefragmentationStats)
    12720 {
    12721  if(pAllocationsChanged != VMA_NULL)
    12722  {
    12723  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
    12724  }
    12725  if(pDefragmentationStats != VMA_NULL)
    12726  {
    12727  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
    12728  }
    12729 
    12730  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
    12731 
    12732  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
    12733 
    12734  const size_t poolCount = m_Pools.size();
    12735 
    12736  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
    12737  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    12738  {
    12739  VmaAllocation hAlloc = pAllocations[allocIndex];
    12740  VMA_ASSERT(hAlloc);
    12741  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    12742  // DedicatedAlloc cannot be defragmented.
    12743  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    12744  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    12745  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
    12746  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
    12747  // Lost allocation cannot be defragmented.
    12748  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    12749  {
    12750  VmaBlockVector* pAllocBlockVector = VMA_NULL;
    12751 
    12752  const VmaPool hAllocPool = hAlloc->GetPool();
    12753  // This allocation belongs to custom pool.
    12754  if(hAllocPool != VK_NULL_HANDLE)
    12755  {
    12756  // Pools with linear or buddy algorithm are not defragmented.
    12757  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
    12758  {
    12759  pAllocBlockVector = &hAllocPool->m_BlockVector;
    12760  }
    12761  }
    12762  // This allocation belongs to general pool.
    12763  else
    12764  {
    12765  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
    12766  }
    12767 
    12768  if(pAllocBlockVector != VMA_NULL)
    12769  {
    12770  VmaDefragmentator* const pDefragmentator =
    12771  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
    12772  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    12773  &pAllocationsChanged[allocIndex] : VMA_NULL;
    12774  pDefragmentator->AddAllocation(hAlloc, pChanged);
    12775  }
    12776  }
    12777  }
    12778 
    12779  VkResult result = VK_SUCCESS;
    12780 
    12781  // ======== Main processing.
    12782 
    12783  VkDeviceSize maxBytesToMove = SIZE_MAX;
    12784  uint32_t maxAllocationsToMove = UINT32_MAX;
    12785  if(pDefragmentationInfo != VMA_NULL)
    12786  {
    12787  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
    12788  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    12789  }
    12790 
    12791  // Process standard memory.
    12792  for(uint32_t memTypeIndex = 0;
    12793  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
    12794  ++memTypeIndex)
    12795  {
    12796  // Only HOST_VISIBLE memory types can be defragmented.
    12797  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12798  {
    12799  result = m_pBlockVectors[memTypeIndex]->Defragment(
    12800  pDefragmentationStats,
    12801  maxBytesToMove,
    12802  maxAllocationsToMove);
    12803  }
    12804  }
    12805 
    12806  // Process custom pools.
    12807  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
    12808  {
    12809  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
    12810  pDefragmentationStats,
    12811  maxBytesToMove,
    12812  maxAllocationsToMove);
    12813  }
    12814 
    12815  // ======== Destroy defragmentators.
    12816 
    12817  // Process custom pools.
    12818  for(size_t poolIndex = poolCount; poolIndex--; )
    12819  {
    12820  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
    12821  }
    12822 
    12823  // Process standard memory.
    12824  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
    12825  {
    12826  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12827  {
    12828  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
    12829  }
    12830  }
    12831 
    12832  return result;
    12833 }
    12834 
    12835 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    12836 {
    12837  if(hAllocation->CanBecomeLost())
    12838  {
    12839  /*
    12840  Warning: This is a carefully designed algorithm.
    12841  Do not modify unless you really know what you're doing :)
    12842  */
    12843  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12844  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12845  for(;;)
    12846  {
    12847  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12848  {
    12849  pAllocationInfo->memoryType = UINT32_MAX;
    12850  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    12851  pAllocationInfo->offset = 0;
    12852  pAllocationInfo->size = hAllocation->GetSize();
    12853  pAllocationInfo->pMappedData = VMA_NULL;
    12854  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12855  return;
    12856  }
    12857  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12858  {
    12859  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12860  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12861  pAllocationInfo->offset = hAllocation->GetOffset();
    12862  pAllocationInfo->size = hAllocation->GetSize();
    12863  pAllocationInfo->pMappedData = VMA_NULL;
    12864  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12865  return;
    12866  }
    12867  else // Last use time earlier than current time.
    12868  {
    12869  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12870  {
    12871  localLastUseFrameIndex = localCurrFrameIndex;
    12872  }
    12873  }
    12874  }
    12875  }
    12876  else
    12877  {
    12878 #if VMA_STATS_STRING_ENABLED
    12879  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12880  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12881  for(;;)
    12882  {
    12883  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12884  if(localLastUseFrameIndex == localCurrFrameIndex)
    12885  {
    12886  break;
    12887  }
    12888  else // Last use time earlier than current time.
    12889  {
    12890  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12891  {
    12892  localLastUseFrameIndex = localCurrFrameIndex;
    12893  }
    12894  }
    12895  }
    12896 #endif
    12897 
    12898  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12899  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12900  pAllocationInfo->offset = hAllocation->GetOffset();
    12901  pAllocationInfo->size = hAllocation->GetSize();
    12902  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    12903  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12904  }
    12905 }
    12906 
    12907 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
    12908 {
    12909  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
    12910  if(hAllocation->CanBecomeLost())
    12911  {
    12912  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12913  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12914  for(;;)
    12915  {
    12916  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12917  {
    12918  return false;
    12919  }
    12920  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12921  {
    12922  return true;
    12923  }
    12924  else // Last use time earlier than current time.
    12925  {
    12926  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12927  {
    12928  localLastUseFrameIndex = localCurrFrameIndex;
    12929  }
    12930  }
    12931  }
    12932  }
    12933  else
    12934  {
    12935 #if VMA_STATS_STRING_ENABLED
    12936  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12937  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12938  for(;;)
    12939  {
    12940  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12941  if(localLastUseFrameIndex == localCurrFrameIndex)
    12942  {
    12943  break;
    12944  }
    12945  else // Last use time earlier than current time.
    12946  {
    12947  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12948  {
    12949  localLastUseFrameIndex = localCurrFrameIndex;
    12950  }
    12951  }
    12952  }
    12953 #endif
    12954 
    12955  return true;
    12956  }
    12957 }
    12958 
    12959 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    12960 {
    12961  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
    12962 
    12963  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    12964 
    12965  if(newCreateInfo.maxBlockCount == 0)
    12966  {
    12967  newCreateInfo.maxBlockCount = SIZE_MAX;
    12968  }
    12969  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
    12970  {
    12971  return VK_ERROR_INITIALIZATION_FAILED;
    12972  }
    12973 
    12974  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    12975 
    12976  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
    12977 
    12978  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    12979  if(res != VK_SUCCESS)
    12980  {
    12981  vma_delete(this, *pPool);
    12982  *pPool = VMA_NULL;
    12983  return res;
    12984  }
    12985 
    12986  // Add to m_Pools.
    12987  {
    12988  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12989  (*pPool)->SetId(m_NextPoolId++);
    12990  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    12991  }
    12992 
    12993  return VK_SUCCESS;
    12994 }
    12995 
    12996 void VmaAllocator_T::DestroyPool(VmaPool pool)
    12997 {
    12998  // Remove from m_Pools.
    12999  {
    13000  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13001  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    13002  VMA_ASSERT(success && "Pool not found in Allocator.");
    13003  }
    13004 
    13005  vma_delete(this, pool);
    13006 }
    13007 
    13008 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    13009 {
    13010  pool->m_BlockVector.GetPoolStats(pPoolStats);
    13011 }
    13012 
    13013 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    13014 {
    13015  m_CurrentFrameIndex.store(frameIndex);
    13016 }
    13017 
    13018 void VmaAllocator_T::MakePoolAllocationsLost(
    13019  VmaPool hPool,
    13020  size_t* pLostAllocationCount)
    13021 {
    13022  hPool->m_BlockVector.MakePoolAllocationsLost(
    13023  m_CurrentFrameIndex.load(),
    13024  pLostAllocationCount);
    13025 }
    13026 
    13027 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
    13028 {
    13029  return hPool->m_BlockVector.CheckCorruption();
    13030 }
    13031 
    13032 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
    13033 {
    13034  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
    13035 
    13036  // Process default pools.
    13037  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13038  {
    13039  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
    13040  {
    13041  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    13042  VMA_ASSERT(pBlockVector);
    13043  VkResult localRes = pBlockVector->CheckCorruption();
    13044  switch(localRes)
    13045  {
    13046  case VK_ERROR_FEATURE_NOT_PRESENT:
    13047  break;
    13048  case VK_SUCCESS:
    13049  finalRes = VK_SUCCESS;
    13050  break;
    13051  default:
    13052  return localRes;
    13053  }
    13054  }
    13055  }
    13056 
    13057  // Process custom pools.
    13058  {
    13059  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13060  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    13061  {
    13062  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
    13063  {
    13064  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
    13065  switch(localRes)
    13066  {
    13067  case VK_ERROR_FEATURE_NOT_PRESENT:
    13068  break;
    13069  case VK_SUCCESS:
    13070  finalRes = VK_SUCCESS;
    13071  break;
    13072  default:
    13073  return localRes;
    13074  }
    13075  }
    13076  }
    13077  }
    13078 
    13079  return finalRes;
    13080 }
    13081 
    13082 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    13083 {
    13084  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
    13085  (*pAllocation)->InitLost();
    13086 }
    13087 
    13088 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    13089 {
    13090  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    13091 
    13092  VkResult res;
    13093  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    13094  {
    13095  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    13096  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    13097  {
    13098  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    13099  if(res == VK_SUCCESS)
    13100  {
    13101  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    13102  }
    13103  }
    13104  else
    13105  {
    13106  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    13107  }
    13108  }
    13109  else
    13110  {
    13111  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    13112  }
    13113 
    13114  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    13115  {
    13116  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    13117  }
    13118 
    13119  return res;
    13120 }
    13121 
    13122 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    13123 {
    13124  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    13125  {
    13126  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    13127  }
    13128 
    13129  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    13130 
    13131  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    13132  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    13133  {
    13134  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    13135  m_HeapSizeLimit[heapIndex] += size;
    13136  }
    13137 }
    13138 
    13139 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    13140 {
    13141  if(hAllocation->CanBecomeLost())
    13142  {
    13143  return VK_ERROR_MEMORY_MAP_FAILED;
    13144  }
    13145 
    13146  switch(hAllocation->GetType())
    13147  {
    13148  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13149  {
    13150  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    13151  char *pBytes = VMA_NULL;
    13152  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
    13153  if(res == VK_SUCCESS)
    13154  {
    13155  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    13156  hAllocation->BlockAllocMap();
    13157  }
    13158  return res;
    13159  }
    13160  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13161  return hAllocation->DedicatedAllocMap(this, ppData);
    13162  default:
    13163  VMA_ASSERT(0);
    13164  return VK_ERROR_MEMORY_MAP_FAILED;
    13165  }
    13166 }
    13167 
    13168 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    13169 {
    13170  switch(hAllocation->GetType())
    13171  {
    13172  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13173  {
    13174  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    13175  hAllocation->BlockAllocUnmap();
    13176  pBlock->Unmap(this, 1);
    13177  }
    13178  break;
    13179  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13180  hAllocation->DedicatedAllocUnmap(this);
    13181  break;
    13182  default:
    13183  VMA_ASSERT(0);
    13184  }
    13185 }
    13186 
    13187 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
    13188 {
    13189  VkResult res = VK_SUCCESS;
    13190  switch(hAllocation->GetType())
    13191  {
    13192  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13193  res = GetVulkanFunctions().vkBindBufferMemory(
    13194  m_hDevice,
    13195  hBuffer,
    13196  hAllocation->GetMemory(),
    13197  0); //memoryOffset
    13198  break;
    13199  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13200  {
    13201  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13202  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
    13203  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
    13204  break;
    13205  }
    13206  default:
    13207  VMA_ASSERT(0);
    13208  }
    13209  return res;
    13210 }
    13211 
    13212 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
    13213 {
    13214  VkResult res = VK_SUCCESS;
    13215  switch(hAllocation->GetType())
    13216  {
    13217  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13218  res = GetVulkanFunctions().vkBindImageMemory(
    13219  m_hDevice,
    13220  hImage,
    13221  hAllocation->GetMemory(),
    13222  0); //memoryOffset
    13223  break;
    13224  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13225  {
    13226  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13227  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
    13228  res = pBlock->BindImageMemory(this, hAllocation, hImage);
    13229  break;
    13230  }
    13231  default:
    13232  VMA_ASSERT(0);
    13233  }
    13234  return res;
    13235 }
    13236 
    13237 void VmaAllocator_T::FlushOrInvalidateAllocation(
    13238  VmaAllocation hAllocation,
    13239  VkDeviceSize offset, VkDeviceSize size,
    13240  VMA_CACHE_OPERATION op)
    13241 {
    13242  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
    13243  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
    13244  {
    13245  const VkDeviceSize allocationSize = hAllocation->GetSize();
    13246  VMA_ASSERT(offset <= allocationSize);
    13247 
    13248  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    13249 
    13250  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    13251  memRange.memory = hAllocation->GetMemory();
    13252 
    13253  switch(hAllocation->GetType())
    13254  {
    13255  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13256  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13257  if(size == VK_WHOLE_SIZE)
    13258  {
    13259  memRange.size = allocationSize - memRange.offset;
    13260  }
    13261  else
    13262  {
    13263  VMA_ASSERT(offset + size <= allocationSize);
    13264  memRange.size = VMA_MIN(
    13265  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
    13266  allocationSize - memRange.offset);
    13267  }
    13268  break;
    13269 
    13270  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13271  {
    13272  // 1. Still within this allocation.
    13273  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13274  if(size == VK_WHOLE_SIZE)
    13275  {
    13276  size = allocationSize - offset;
    13277  }
    13278  else
    13279  {
    13280  VMA_ASSERT(offset + size <= allocationSize);
    13281  }
    13282  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
    13283 
    13284  // 2. Adjust to whole block.
    13285  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
    13286  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
    13287  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
    13288  memRange.offset += allocationOffset;
    13289  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
    13290 
    13291  break;
    13292  }
    13293 
    13294  default:
    13295  VMA_ASSERT(0);
    13296  }
    13297 
    13298  switch(op)
    13299  {
    13300  case VMA_CACHE_FLUSH:
    13301  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13302  break;
    13303  case VMA_CACHE_INVALIDATE:
    13304  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13305  break;
    13306  default:
    13307  VMA_ASSERT(0);
    13308  }
    13309  }
    13310  // else: Just ignore this call.
    13311 }
    13312 
    13313 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    13314 {
    13315  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    13316 
    13317  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    13318  {
    13319  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13320  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    13321  VMA_ASSERT(pDedicatedAllocations);
    13322  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    13323  VMA_ASSERT(success);
    13324  }
    13325 
    13326  VkDeviceMemory hMemory = allocation->GetMemory();
    13327 
    13328  /*
    13329  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    13330  before vkFreeMemory.
    13331 
    13332  if(allocation->GetMappedData() != VMA_NULL)
    13333  {
    13334  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    13335  }
    13336  */
    13337 
    13338  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    13339 
    13340  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    13341 }
    13342 
    13343 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
    13344 {
    13345  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
    13346  !hAllocation->CanBecomeLost() &&
    13347  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13348  {
    13349  void* pData = VMA_NULL;
    13350  VkResult res = Map(hAllocation, &pData);
    13351  if(res == VK_SUCCESS)
    13352  {
    13353  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
    13354  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
    13355  Unmap(hAllocation);
    13356  }
    13357  else
    13358  {
    13359  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
    13360  }
    13361  }
    13362 }
    13363 
    13364 #if VMA_STATS_STRING_ENABLED
    13365 
    13366 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    13367 {
    13368  bool dedicatedAllocationsStarted = false;
    13369  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13370  {
    13371  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13372  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    13373  VMA_ASSERT(pDedicatedAllocVector);
    13374  if(pDedicatedAllocVector->empty() == false)
    13375  {
    13376  if(dedicatedAllocationsStarted == false)
    13377  {
    13378  dedicatedAllocationsStarted = true;
    13379  json.WriteString("DedicatedAllocations");
    13380  json.BeginObject();
    13381  }
    13382 
    13383  json.BeginString("Type ");
    13384  json.ContinueString(memTypeIndex);
    13385  json.EndString();
    13386 
    13387  json.BeginArray();
    13388 
    13389  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    13390  {
    13391  json.BeginObject(true);
    13392  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    13393  hAlloc->PrintParameters(json);
    13394  json.EndObject();
    13395  }
    13396 
    13397  json.EndArray();
    13398  }
    13399  }
    13400  if(dedicatedAllocationsStarted)
    13401  {
    13402  json.EndObject();
    13403  }
    13404 
    13405  {
    13406  bool allocationsStarted = false;
    13407  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13408  {
    13409  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    13410  {
    13411  if(allocationsStarted == false)
    13412  {
    13413  allocationsStarted = true;
    13414  json.WriteString("DefaultPools");
    13415  json.BeginObject();
    13416  }
    13417 
    13418  json.BeginString("Type ");
    13419  json.ContinueString(memTypeIndex);
    13420  json.EndString();
    13421 
    13422  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    13423  }
    13424  }
    13425  if(allocationsStarted)
    13426  {
    13427  json.EndObject();
    13428  }
    13429  }
    13430 
    13431  // Custom pools
    13432  {
    13433  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13434  const size_t poolCount = m_Pools.size();
    13435  if(poolCount > 0)
    13436  {
    13437  json.WriteString("Pools");
    13438  json.BeginObject();
    13439  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    13440  {
    13441  json.BeginString();
    13442  json.ContinueString(m_Pools[poolIndex]->GetId());
    13443  json.EndString();
    13444 
    13445  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    13446  }
    13447  json.EndObject();
    13448  }
    13449  }
    13450 }
    13451 
    13452 #endif // #if VMA_STATS_STRING_ENABLED
    13453 
    13455 // Public interface
    13456 
    13457 VkResult vmaCreateAllocator(
    13458  const VmaAllocatorCreateInfo* pCreateInfo,
    13459  VmaAllocator* pAllocator)
    13460 {
    13461  VMA_ASSERT(pCreateInfo && pAllocator);
    13462  VMA_DEBUG_LOG("vmaCreateAllocator");
    13463  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    13464  return (*pAllocator)->Init(pCreateInfo);
    13465 }
    13466 
    13467 void vmaDestroyAllocator(
    13468  VmaAllocator allocator)
    13469 {
    13470  if(allocator != VK_NULL_HANDLE)
    13471  {
    13472  VMA_DEBUG_LOG("vmaDestroyAllocator");
    13473  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    13474  vma_delete(&allocationCallbacks, allocator);
    13475  }
    13476 }
    13477 
    13479  VmaAllocator allocator,
    13480  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    13481 {
    13482  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    13483  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    13484 }
    13485 
    13487  VmaAllocator allocator,
    13488  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    13489 {
    13490  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    13491  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    13492 }
    13493 
    13495  VmaAllocator allocator,
    13496  uint32_t memoryTypeIndex,
    13497  VkMemoryPropertyFlags* pFlags)
    13498 {
    13499  VMA_ASSERT(allocator && pFlags);
    13500  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    13501  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    13502 }
    13503 
    13505  VmaAllocator allocator,
    13506  uint32_t frameIndex)
    13507 {
    13508  VMA_ASSERT(allocator);
    13509  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    13510 
    13511  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13512 
    13513  allocator->SetCurrentFrameIndex(frameIndex);
    13514 }
    13515 
    13516 void vmaCalculateStats(
    13517  VmaAllocator allocator,
    13518  VmaStats* pStats)
    13519 {
    13520  VMA_ASSERT(allocator && pStats);
    13521  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13522  allocator->CalculateStats(pStats);
    13523 }
    13524 
    13525 #if VMA_STATS_STRING_ENABLED
    13526 
    13527 void vmaBuildStatsString(
    13528  VmaAllocator allocator,
    13529  char** ppStatsString,
    13530  VkBool32 detailedMap)
    13531 {
    13532  VMA_ASSERT(allocator && ppStatsString);
    13533  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13534 
    13535  VmaStringBuilder sb(allocator);
    13536  {
    13537  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    13538  json.BeginObject();
    13539 
    13540  VmaStats stats;
    13541  allocator->CalculateStats(&stats);
    13542 
    13543  json.WriteString("Total");
    13544  VmaPrintStatInfo(json, stats.total);
    13545 
    13546  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    13547  {
    13548  json.BeginString("Heap ");
    13549  json.ContinueString(heapIndex);
    13550  json.EndString();
    13551  json.BeginObject();
    13552 
    13553  json.WriteString("Size");
    13554  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    13555 
    13556  json.WriteString("Flags");
    13557  json.BeginArray(true);
    13558  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    13559  {
    13560  json.WriteString("DEVICE_LOCAL");
    13561  }
    13562  json.EndArray();
    13563 
    13564  if(stats.memoryHeap[heapIndex].blockCount > 0)
    13565  {
    13566  json.WriteString("Stats");
    13567  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    13568  }
    13569 
    13570  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    13571  {
    13572  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    13573  {
    13574  json.BeginString("Type ");
    13575  json.ContinueString(typeIndex);
    13576  json.EndString();
    13577 
    13578  json.BeginObject();
    13579 
    13580  json.WriteString("Flags");
    13581  json.BeginArray(true);
    13582  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    13583  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    13584  {
    13585  json.WriteString("DEVICE_LOCAL");
    13586  }
    13587  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13588  {
    13589  json.WriteString("HOST_VISIBLE");
    13590  }
    13591  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    13592  {
    13593  json.WriteString("HOST_COHERENT");
    13594  }
    13595  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    13596  {
    13597  json.WriteString("HOST_CACHED");
    13598  }
    13599  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    13600  {
    13601  json.WriteString("LAZILY_ALLOCATED");
    13602  }
    13603  json.EndArray();
    13604 
    13605  if(stats.memoryType[typeIndex].blockCount > 0)
    13606  {
    13607  json.WriteString("Stats");
    13608  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    13609  }
    13610 
    13611  json.EndObject();
    13612  }
    13613  }
    13614 
    13615  json.EndObject();
    13616  }
    13617  if(detailedMap == VK_TRUE)
    13618  {
    13619  allocator->PrintDetailedMap(json);
    13620  }
    13621 
    13622  json.EndObject();
    13623  }
    13624 
    13625  const size_t len = sb.GetLength();
    13626  char* const pChars = vma_new_array(allocator, char, len + 1);
    13627  if(len > 0)
    13628  {
    13629  memcpy(pChars, sb.GetData(), len);
    13630  }
    13631  pChars[len] = '\0';
    13632  *ppStatsString = pChars;
    13633 }
    13634 
    13635 void vmaFreeStatsString(
    13636  VmaAllocator allocator,
    13637  char* pStatsString)
    13638 {
    13639  if(pStatsString != VMA_NULL)
    13640  {
    13641  VMA_ASSERT(allocator);
    13642  size_t len = strlen(pStatsString);
    13643  vma_delete_array(allocator, pStatsString, len + 1);
    13644  }
    13645 }
    13646 
    13647 #endif // #if VMA_STATS_STRING_ENABLED
    13648 
    13649 /*
    13650 This function is not protected by any mutex because it just reads immutable data.
    13651 */
    13652 VkResult vmaFindMemoryTypeIndex(
    13653  VmaAllocator allocator,
    13654  uint32_t memoryTypeBits,
    13655  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13656  uint32_t* pMemoryTypeIndex)
    13657 {
    13658  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13659  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13660  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13661 
    13662  if(pAllocationCreateInfo->memoryTypeBits != 0)
    13663  {
    13664  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
    13665  }
    13666 
    13667  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    13668  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    13669 
    13670  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    13671  if(mapped)
    13672  {
    13673  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13674  }
    13675 
    13676  // Convert usage to requiredFlags and preferredFlags.
    13677  switch(pAllocationCreateInfo->usage)
    13678  {
    13680  break;
    13682  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13683  {
    13684  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13685  }
    13686  break;
    13688  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    13689  break;
    13691  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13692  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13693  {
    13694  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13695  }
    13696  break;
    13698  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13699  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    13700  break;
    13701  default:
    13702  break;
    13703  }
    13704 
    13705  *pMemoryTypeIndex = UINT32_MAX;
    13706  uint32_t minCost = UINT32_MAX;
    13707  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    13708  memTypeIndex < allocator->GetMemoryTypeCount();
    13709  ++memTypeIndex, memTypeBit <<= 1)
    13710  {
    13711  // This memory type is acceptable according to memoryTypeBits bitmask.
    13712  if((memTypeBit & memoryTypeBits) != 0)
    13713  {
    13714  const VkMemoryPropertyFlags currFlags =
    13715  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    13716  // This memory type contains requiredFlags.
    13717  if((requiredFlags & ~currFlags) == 0)
    13718  {
    13719  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    13720  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
    13721  // Remember memory type with lowest cost.
    13722  if(currCost < minCost)
    13723  {
    13724  *pMemoryTypeIndex = memTypeIndex;
    13725  if(currCost == 0)
    13726  {
    13727  return VK_SUCCESS;
    13728  }
    13729  minCost = currCost;
    13730  }
    13731  }
    13732  }
    13733  }
    13734  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    13735 }
    13736 
    13738  VmaAllocator allocator,
    13739  const VkBufferCreateInfo* pBufferCreateInfo,
    13740  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13741  uint32_t* pMemoryTypeIndex)
    13742 {
    13743  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13744  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
    13745  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13746  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13747 
    13748  const VkDevice hDev = allocator->m_hDevice;
    13749  VkBuffer hBuffer = VK_NULL_HANDLE;
    13750  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
    13751  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
    13752  if(res == VK_SUCCESS)
    13753  {
    13754  VkMemoryRequirements memReq = {};
    13755  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
    13756  hDev, hBuffer, &memReq);
    13757 
    13758  res = vmaFindMemoryTypeIndex(
    13759  allocator,
    13760  memReq.memoryTypeBits,
    13761  pAllocationCreateInfo,
    13762  pMemoryTypeIndex);
    13763 
    13764  allocator->GetVulkanFunctions().vkDestroyBuffer(
    13765  hDev, hBuffer, allocator->GetAllocationCallbacks());
    13766  }
    13767  return res;
    13768 }
    13769 
    13771  VmaAllocator allocator,
    13772  const VkImageCreateInfo* pImageCreateInfo,
    13773  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13774  uint32_t* pMemoryTypeIndex)
    13775 {
    13776  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13777  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
    13778  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13779  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13780 
    13781  const VkDevice hDev = allocator->m_hDevice;
    13782  VkImage hImage = VK_NULL_HANDLE;
    13783  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
    13784  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
    13785  if(res == VK_SUCCESS)
    13786  {
    13787  VkMemoryRequirements memReq = {};
    13788  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
    13789  hDev, hImage, &memReq);
    13790 
    13791  res = vmaFindMemoryTypeIndex(
    13792  allocator,
    13793  memReq.memoryTypeBits,
    13794  pAllocationCreateInfo,
    13795  pMemoryTypeIndex);
    13796 
    13797  allocator->GetVulkanFunctions().vkDestroyImage(
    13798  hDev, hImage, allocator->GetAllocationCallbacks());
    13799  }
    13800  return res;
    13801 }
    13802 
    13803 VkResult vmaCreatePool(
    13804  VmaAllocator allocator,
    13805  const VmaPoolCreateInfo* pCreateInfo,
    13806  VmaPool* pPool)
    13807 {
    13808  VMA_ASSERT(allocator && pCreateInfo && pPool);
    13809 
    13810  VMA_DEBUG_LOG("vmaCreatePool");
    13811 
    13812  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13813 
    13814  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
    13815 
    13816 #if VMA_RECORDING_ENABLED
    13817  if(allocator->GetRecorder() != VMA_NULL)
    13818  {
    13819  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
    13820  }
    13821 #endif
    13822 
    13823  return res;
    13824 }
    13825 
    13826 void vmaDestroyPool(
    13827  VmaAllocator allocator,
    13828  VmaPool pool)
    13829 {
    13830  VMA_ASSERT(allocator);
    13831 
    13832  if(pool == VK_NULL_HANDLE)
    13833  {
    13834  return;
    13835  }
    13836 
    13837  VMA_DEBUG_LOG("vmaDestroyPool");
    13838 
    13839  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13840 
    13841 #if VMA_RECORDING_ENABLED
    13842  if(allocator->GetRecorder() != VMA_NULL)
    13843  {
    13844  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
    13845  }
    13846 #endif
    13847 
    13848  allocator->DestroyPool(pool);
    13849 }
    13850 
    13851 void vmaGetPoolStats(
    13852  VmaAllocator allocator,
    13853  VmaPool pool,
    13854  VmaPoolStats* pPoolStats)
    13855 {
    13856  VMA_ASSERT(allocator && pool && pPoolStats);
    13857 
    13858  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13859 
    13860  allocator->GetPoolStats(pool, pPoolStats);
    13861 }
    13862 
    13864  VmaAllocator allocator,
    13865  VmaPool pool,
    13866  size_t* pLostAllocationCount)
    13867 {
    13868  VMA_ASSERT(allocator && pool);
    13869 
    13870  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13871 
    13872 #if VMA_RECORDING_ENABLED
    13873  if(allocator->GetRecorder() != VMA_NULL)
    13874  {
    13875  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
    13876  }
    13877 #endif
    13878 
    13879  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    13880 }
    13881 
    13882 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    13883 {
    13884  VMA_ASSERT(allocator && pool);
    13885 
    13886  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13887 
    13888  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
    13889 
    13890  return allocator->CheckPoolCorruption(pool);
    13891 }
    13892 
    13893 VkResult vmaAllocateMemory(
    13894  VmaAllocator allocator,
    13895  const VkMemoryRequirements* pVkMemoryRequirements,
    13896  const VmaAllocationCreateInfo* pCreateInfo,
    13897  VmaAllocation* pAllocation,
    13898  VmaAllocationInfo* pAllocationInfo)
    13899 {
    13900  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    13901 
    13902  VMA_DEBUG_LOG("vmaAllocateMemory");
    13903 
    13904  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13905 
    13906  VkResult result = allocator->AllocateMemory(
    13907  *pVkMemoryRequirements,
    13908  false, // requiresDedicatedAllocation
    13909  false, // prefersDedicatedAllocation
    13910  VK_NULL_HANDLE, // dedicatedBuffer
    13911  VK_NULL_HANDLE, // dedicatedImage
    13912  *pCreateInfo,
    13913  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    13914  1, // allocationCount
    13915  pAllocation);
    13916 
    13917 #if VMA_RECORDING_ENABLED
    13918  if(allocator->GetRecorder() != VMA_NULL)
    13919  {
    13920  allocator->GetRecorder()->RecordAllocateMemory(
    13921  allocator->GetCurrentFrameIndex(),
    13922  *pVkMemoryRequirements,
    13923  *pCreateInfo,
    13924  *pAllocation);
    13925  }
    13926 #endif
    13927 
    13928  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    13929  {
    13930  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13931  }
    13932 
    13933  return result;
    13934 }
    13935 
    13936 VkResult vmaAllocateMemoryPages(
    13937  VmaAllocator allocator,
    13938  const VkMemoryRequirements* pVkMemoryRequirements,
    13939  const VmaAllocationCreateInfo* pCreateInfo,
    13940  size_t allocationCount,
    13941  VmaAllocation* pAllocations,
    13942  VmaAllocationInfo* pAllocationInfo)
    13943 {
    13944  if(allocationCount == 0)
    13945  {
    13946  return VK_SUCCESS;
    13947  }
    13948 
    13949  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
    13950 
    13951  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
    13952 
    13953  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13954 
    13955  VkResult result = allocator->AllocateMemory(
    13956  *pVkMemoryRequirements,
    13957  false, // requiresDedicatedAllocation
    13958  false, // prefersDedicatedAllocation
    13959  VK_NULL_HANDLE, // dedicatedBuffer
    13960  VK_NULL_HANDLE, // dedicatedImage
    13961  *pCreateInfo,
    13962  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    13963  allocationCount,
    13964  pAllocations);
    13965 
    13966 #if VMA_RECORDING_ENABLED
    13967  if(allocator->GetRecorder() != VMA_NULL)
    13968  {
    13969  // TODO: Extend recording format with this function.
    13970  /*
    13971  allocator->GetRecorder()->RecordAllocateMemoryPages(
    13972  allocator->GetCurrentFrameIndex(),
    13973  *pVkMemoryRequirements,
    13974  *pCreateInfo,
    13975  *pAllocation);
    13976  */
    13977  }
    13978 #endif
    13979 
    13980  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    13981  {
    13982  for(size_t i = 0; i < allocationCount; ++i)
    13983  {
    13984  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
    13985  }
    13986  }
    13987 
    13988  return result;
    13989 }
    13990 
    13992  VmaAllocator allocator,
    13993  VkBuffer buffer,
    13994  const VmaAllocationCreateInfo* pCreateInfo,
    13995  VmaAllocation* pAllocation,
    13996  VmaAllocationInfo* pAllocationInfo)
    13997 {
    13998  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    13999 
    14000  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    14001 
    14002  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14003 
    14004  VkMemoryRequirements vkMemReq = {};
    14005  bool requiresDedicatedAllocation = false;
    14006  bool prefersDedicatedAllocation = false;
    14007  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    14008  requiresDedicatedAllocation,
    14009  prefersDedicatedAllocation);
    14010 
    14011  VkResult result = allocator->AllocateMemory(
    14012  vkMemReq,
    14013  requiresDedicatedAllocation,
    14014  prefersDedicatedAllocation,
    14015  buffer, // dedicatedBuffer
    14016  VK_NULL_HANDLE, // dedicatedImage
    14017  *pCreateInfo,
    14018  VMA_SUBALLOCATION_TYPE_BUFFER,
    14019  1, // allocationCount
    14020  pAllocation);
    14021 
    14022 #if VMA_RECORDING_ENABLED
    14023  if(allocator->GetRecorder() != VMA_NULL)
    14024  {
    14025  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
    14026  allocator->GetCurrentFrameIndex(),
    14027  vkMemReq,
    14028  requiresDedicatedAllocation,
    14029  prefersDedicatedAllocation,
    14030  *pCreateInfo,
    14031  *pAllocation);
    14032  }
    14033 #endif
    14034 
    14035  if(pAllocationInfo && result == VK_SUCCESS)
    14036  {
    14037  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14038  }
    14039 
    14040  return result;
    14041 }
    14042 
    14043 VkResult vmaAllocateMemoryForImage(
    14044  VmaAllocator allocator,
    14045  VkImage image,
    14046  const VmaAllocationCreateInfo* pCreateInfo,
    14047  VmaAllocation* pAllocation,
    14048  VmaAllocationInfo* pAllocationInfo)
    14049 {
    14050  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    14051 
    14052  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    14053 
    14054  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14055 
    14056  VkMemoryRequirements vkMemReq = {};
    14057  bool requiresDedicatedAllocation = false;
    14058  bool prefersDedicatedAllocation = false;
    14059  allocator->GetImageMemoryRequirements(image, vkMemReq,
    14060  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14061 
    14062  VkResult result = allocator->AllocateMemory(
    14063  vkMemReq,
    14064  requiresDedicatedAllocation,
    14065  prefersDedicatedAllocation,
    14066  VK_NULL_HANDLE, // dedicatedBuffer
    14067  image, // dedicatedImage
    14068  *pCreateInfo,
    14069  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    14070  1, // allocationCount
    14071  pAllocation);
    14072 
    14073 #if VMA_RECORDING_ENABLED
    14074  if(allocator->GetRecorder() != VMA_NULL)
    14075  {
    14076  allocator->GetRecorder()->RecordAllocateMemoryForImage(
    14077  allocator->GetCurrentFrameIndex(),
    14078  vkMemReq,
    14079  requiresDedicatedAllocation,
    14080  prefersDedicatedAllocation,
    14081  *pCreateInfo,
    14082  *pAllocation);
    14083  }
    14084 #endif
    14085 
    14086  if(pAllocationInfo && result == VK_SUCCESS)
    14087  {
    14088  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14089  }
    14090 
    14091  return result;
    14092 }
    14093 
    14094 void vmaFreeMemory(
    14095  VmaAllocator allocator,
    14096  VmaAllocation allocation)
    14097 {
    14098  VMA_ASSERT(allocator);
    14099 
    14100  if(allocation == VK_NULL_HANDLE)
    14101  {
    14102  return;
    14103  }
    14104 
    14105  VMA_DEBUG_LOG("vmaFreeMemory");
    14106 
    14107  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14108 
    14109 #if VMA_RECORDING_ENABLED
    14110  if(allocator->GetRecorder() != VMA_NULL)
    14111  {
    14112  allocator->GetRecorder()->RecordFreeMemory(
    14113  allocator->GetCurrentFrameIndex(),
    14114  allocation);
    14115  }
    14116 #endif
    14117 
    14118  allocator->FreeMemory(
    14119  1, // allocationCount
    14120  &allocation);
    14121 }
    14122 
    14123 void vmaFreeMemoryPages(
    14124  VmaAllocator allocator,
    14125  size_t allocationCount,
    14126  VmaAllocation* pAllocations)
    14127 {
    14128  if(allocationCount == 0)
    14129  {
    14130  return;
    14131  }
    14132 
    14133  VMA_ASSERT(allocator);
    14134 
    14135  VMA_DEBUG_LOG("vmaFreeMemoryPages");
    14136 
    14137  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14138 
    14139 #if VMA_RECORDING_ENABLED
    14140  // TODO Add this to recording file format.
    14141  /*
    14142  if(allocator->GetRecorder() != VMA_NULL)
    14143  {
    14144  allocator->GetRecorder()->RecordFreeMemoryPages(
    14145  allocator->GetCurrentFrameIndex(),
    14146  allocation);
    14147  }
    14148  */
    14149 #endif
    14150 
    14151  allocator->FreeMemory(allocationCount, pAllocations);
    14152 }
    14153 
    14155  VmaAllocator allocator,
    14156  VmaAllocation allocation,
    14157  VmaAllocationInfo* pAllocationInfo)
    14158 {
    14159  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    14160 
    14161  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14162 
    14163 #if VMA_RECORDING_ENABLED
    14164  if(allocator->GetRecorder() != VMA_NULL)
    14165  {
    14166  allocator->GetRecorder()->RecordGetAllocationInfo(
    14167  allocator->GetCurrentFrameIndex(),
    14168  allocation);
    14169  }
    14170 #endif
    14171 
    14172  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    14173 }
    14174 
    14175 VkBool32 vmaTouchAllocation(
    14176  VmaAllocator allocator,
    14177  VmaAllocation allocation)
    14178 {
    14179  VMA_ASSERT(allocator && allocation);
    14180 
    14181  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14182 
    14183 #if VMA_RECORDING_ENABLED
    14184  if(allocator->GetRecorder() != VMA_NULL)
    14185  {
    14186  allocator->GetRecorder()->RecordTouchAllocation(
    14187  allocator->GetCurrentFrameIndex(),
    14188  allocation);
    14189  }
    14190 #endif
    14191 
    14192  return allocator->TouchAllocation(allocation);
    14193 }
    14194 
    14196  VmaAllocator allocator,
    14197  VmaAllocation allocation,
    14198  void* pUserData)
    14199 {
    14200  VMA_ASSERT(allocator && allocation);
    14201 
    14202  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14203 
    14204  allocation->SetUserData(allocator, pUserData);
    14205 
    14206 #if VMA_RECORDING_ENABLED
    14207  if(allocator->GetRecorder() != VMA_NULL)
    14208  {
    14209  allocator->GetRecorder()->RecordSetAllocationUserData(
    14210  allocator->GetCurrentFrameIndex(),
    14211  allocation,
    14212  pUserData);
    14213  }
    14214 #endif
    14215 }
    14216 
    14218  VmaAllocator allocator,
    14219  VmaAllocation* pAllocation)
    14220 {
    14221  VMA_ASSERT(allocator && pAllocation);
    14222 
    14223  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    14224 
    14225  allocator->CreateLostAllocation(pAllocation);
    14226 
    14227 #if VMA_RECORDING_ENABLED
    14228  if(allocator->GetRecorder() != VMA_NULL)
    14229  {
    14230  allocator->GetRecorder()->RecordCreateLostAllocation(
    14231  allocator->GetCurrentFrameIndex(),
    14232  *pAllocation);
    14233  }
    14234 #endif
    14235 }
    14236 
    14237 VkResult vmaMapMemory(
    14238  VmaAllocator allocator,
    14239  VmaAllocation allocation,
    14240  void** ppData)
    14241 {
    14242  VMA_ASSERT(allocator && allocation && ppData);
    14243 
    14244  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14245 
    14246  VkResult res = allocator->Map(allocation, ppData);
    14247 
    14248 #if VMA_RECORDING_ENABLED
    14249  if(allocator->GetRecorder() != VMA_NULL)
    14250  {
    14251  allocator->GetRecorder()->RecordMapMemory(
    14252  allocator->GetCurrentFrameIndex(),
    14253  allocation);
    14254  }
    14255 #endif
    14256 
    14257  return res;
    14258 }
    14259 
    14260 void vmaUnmapMemory(
    14261  VmaAllocator allocator,
    14262  VmaAllocation allocation)
    14263 {
    14264  VMA_ASSERT(allocator && allocation);
    14265 
    14266  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14267 
    14268 #if VMA_RECORDING_ENABLED
    14269  if(allocator->GetRecorder() != VMA_NULL)
    14270  {
    14271  allocator->GetRecorder()->RecordUnmapMemory(
    14272  allocator->GetCurrentFrameIndex(),
    14273  allocation);
    14274  }
    14275 #endif
    14276 
    14277  allocator->Unmap(allocation);
    14278 }
    14279 
    14280 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    14281 {
    14282  VMA_ASSERT(allocator && allocation);
    14283 
    14284  VMA_DEBUG_LOG("vmaFlushAllocation");
    14285 
    14286  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14287 
    14288  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
    14289 
    14290 #if VMA_RECORDING_ENABLED
    14291  if(allocator->GetRecorder() != VMA_NULL)
    14292  {
    14293  allocator->GetRecorder()->RecordFlushAllocation(
    14294  allocator->GetCurrentFrameIndex(),
    14295  allocation, offset, size);
    14296  }
    14297 #endif
    14298 }
    14299 
    14300 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    14301 {
    14302  VMA_ASSERT(allocator && allocation);
    14303 
    14304  VMA_DEBUG_LOG("vmaInvalidateAllocation");
    14305 
    14306  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14307 
    14308  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
    14309 
    14310 #if VMA_RECORDING_ENABLED
    14311  if(allocator->GetRecorder() != VMA_NULL)
    14312  {
    14313  allocator->GetRecorder()->RecordInvalidateAllocation(
    14314  allocator->GetCurrentFrameIndex(),
    14315  allocation, offset, size);
    14316  }
    14317 #endif
    14318 }
    14319 
    14320 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    14321 {
    14322  VMA_ASSERT(allocator);
    14323 
    14324  VMA_DEBUG_LOG("vmaCheckCorruption");
    14325 
    14326  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14327 
    14328  return allocator->CheckCorruption(memoryTypeBits);
    14329 }
    14330 
    14331 VkResult vmaDefragment(
    14332  VmaAllocator allocator,
    14333  VmaAllocation* pAllocations,
    14334  size_t allocationCount,
    14335  VkBool32* pAllocationsChanged,
    14336  const VmaDefragmentationInfo *pDefragmentationInfo,
    14337  VmaDefragmentationStats* pDefragmentationStats)
    14338 {
    14339  VMA_ASSERT(allocator && pAllocations);
    14340 
    14341  VMA_DEBUG_LOG("vmaDefragment");
    14342 
    14343  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14344 
    14345  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
    14346 }
    14347 
    14348 VkResult vmaBindBufferMemory(
    14349  VmaAllocator allocator,
    14350  VmaAllocation allocation,
    14351  VkBuffer buffer)
    14352 {
    14353  VMA_ASSERT(allocator && allocation && buffer);
    14354 
    14355  VMA_DEBUG_LOG("vmaBindBufferMemory");
    14356 
    14357  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14358 
    14359  return allocator->BindBufferMemory(allocation, buffer);
    14360 }
    14361 
    14362 VkResult vmaBindImageMemory(
    14363  VmaAllocator allocator,
    14364  VmaAllocation allocation,
    14365  VkImage image)
    14366 {
    14367  VMA_ASSERT(allocator && allocation && image);
    14368 
    14369  VMA_DEBUG_LOG("vmaBindImageMemory");
    14370 
    14371  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14372 
    14373  return allocator->BindImageMemory(allocation, image);
    14374 }
    14375 
    14376 VkResult vmaCreateBuffer(
    14377  VmaAllocator allocator,
    14378  const VkBufferCreateInfo* pBufferCreateInfo,
    14379  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14380  VkBuffer* pBuffer,
    14381  VmaAllocation* pAllocation,
    14382  VmaAllocationInfo* pAllocationInfo)
    14383 {
    14384  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    14385 
    14386  VMA_DEBUG_LOG("vmaCreateBuffer");
    14387 
    14388  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14389 
    14390  *pBuffer = VK_NULL_HANDLE;
    14391  *pAllocation = VK_NULL_HANDLE;
    14392 
    14393  // 1. Create VkBuffer.
    14394  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    14395  allocator->m_hDevice,
    14396  pBufferCreateInfo,
    14397  allocator->GetAllocationCallbacks(),
    14398  pBuffer);
    14399  if(res >= 0)
    14400  {
    14401  // 2. vkGetBufferMemoryRequirements.
    14402  VkMemoryRequirements vkMemReq = {};
    14403  bool requiresDedicatedAllocation = false;
    14404  bool prefersDedicatedAllocation = false;
    14405  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    14406  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14407 
    14408  // Make sure alignment requirements for specific buffer usages reported
    14409  // in Physical Device Properties are included in alignment reported by memory requirements.
    14410  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
    14411  {
    14412  VMA_ASSERT(vkMemReq.alignment %
    14413  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
    14414  }
    14415  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
    14416  {
    14417  VMA_ASSERT(vkMemReq.alignment %
    14418  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
    14419  }
    14420  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
    14421  {
    14422  VMA_ASSERT(vkMemReq.alignment %
    14423  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
    14424  }
    14425 
    14426  // 3. Allocate memory using allocator.
    14427  res = allocator->AllocateMemory(
    14428  vkMemReq,
    14429  requiresDedicatedAllocation,
    14430  prefersDedicatedAllocation,
    14431  *pBuffer, // dedicatedBuffer
    14432  VK_NULL_HANDLE, // dedicatedImage
    14433  *pAllocationCreateInfo,
    14434  VMA_SUBALLOCATION_TYPE_BUFFER,
    14435  1, // allocationCount
    14436  pAllocation);
    14437 
    14438 #if VMA_RECORDING_ENABLED
    14439  if(allocator->GetRecorder() != VMA_NULL)
    14440  {
    14441  allocator->GetRecorder()->RecordCreateBuffer(
    14442  allocator->GetCurrentFrameIndex(),
    14443  *pBufferCreateInfo,
    14444  *pAllocationCreateInfo,
    14445  *pAllocation);
    14446  }
    14447 #endif
    14448 
    14449  if(res >= 0)
    14450  {
    14451  // 3. Bind buffer with memory.
    14452  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
    14453  if(res >= 0)
    14454  {
    14455  // All steps succeeded.
    14456  #if VMA_STATS_STRING_ENABLED
    14457  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
    14458  #endif
    14459  if(pAllocationInfo != VMA_NULL)
    14460  {
    14461  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14462  }
    14463 
    14464  return VK_SUCCESS;
    14465  }
    14466  allocator->FreeMemory(
    14467  1, // allocationCount
    14468  pAllocation);
    14469  *pAllocation = VK_NULL_HANDLE;
    14470  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14471  *pBuffer = VK_NULL_HANDLE;
    14472  return res;
    14473  }
    14474  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14475  *pBuffer = VK_NULL_HANDLE;
    14476  return res;
    14477  }
    14478  return res;
    14479 }
    14480 
    14481 void vmaDestroyBuffer(
    14482  VmaAllocator allocator,
    14483  VkBuffer buffer,
    14484  VmaAllocation allocation)
    14485 {
    14486  VMA_ASSERT(allocator);
    14487 
    14488  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14489  {
    14490  return;
    14491  }
    14492 
    14493  VMA_DEBUG_LOG("vmaDestroyBuffer");
    14494 
    14495  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14496 
    14497 #if VMA_RECORDING_ENABLED
    14498  if(allocator->GetRecorder() != VMA_NULL)
    14499  {
    14500  allocator->GetRecorder()->RecordDestroyBuffer(
    14501  allocator->GetCurrentFrameIndex(),
    14502  allocation);
    14503  }
    14504 #endif
    14505 
    14506  if(buffer != VK_NULL_HANDLE)
    14507  {
    14508  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    14509  }
    14510 
    14511  if(allocation != VK_NULL_HANDLE)
    14512  {
    14513  allocator->FreeMemory(
    14514  1, // allocationCount
    14515  &allocation);
    14516  }
    14517 }
    14518 
    14519 VkResult vmaCreateImage(
    14520  VmaAllocator allocator,
    14521  const VkImageCreateInfo* pImageCreateInfo,
    14522  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14523  VkImage* pImage,
    14524  VmaAllocation* pAllocation,
    14525  VmaAllocationInfo* pAllocationInfo)
    14526 {
    14527  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    14528 
    14529  VMA_DEBUG_LOG("vmaCreateImage");
    14530 
    14531  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14532 
    14533  *pImage = VK_NULL_HANDLE;
    14534  *pAllocation = VK_NULL_HANDLE;
    14535 
    14536  // 1. Create VkImage.
    14537  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    14538  allocator->m_hDevice,
    14539  pImageCreateInfo,
    14540  allocator->GetAllocationCallbacks(),
    14541  pImage);
    14542  if(res >= 0)
    14543  {
    14544  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    14545  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    14546  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    14547 
    14548  // 2. Allocate memory using allocator.
    14549  VkMemoryRequirements vkMemReq = {};
    14550  bool requiresDedicatedAllocation = false;
    14551  bool prefersDedicatedAllocation = false;
    14552  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
    14553  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14554 
    14555  res = allocator->AllocateMemory(
    14556  vkMemReq,
    14557  requiresDedicatedAllocation,
    14558  prefersDedicatedAllocation,
    14559  VK_NULL_HANDLE, // dedicatedBuffer
    14560  *pImage, // dedicatedImage
    14561  *pAllocationCreateInfo,
    14562  suballocType,
    14563  1, // allocationCount
    14564  pAllocation);
    14565 
    14566 #if VMA_RECORDING_ENABLED
    14567  if(allocator->GetRecorder() != VMA_NULL)
    14568  {
    14569  allocator->GetRecorder()->RecordCreateImage(
    14570  allocator->GetCurrentFrameIndex(),
    14571  *pImageCreateInfo,
    14572  *pAllocationCreateInfo,
    14573  *pAllocation);
    14574  }
    14575 #endif
    14576 
    14577  if(res >= 0)
    14578  {
    14579  // 3. Bind image with memory.
    14580  res = allocator->BindImageMemory(*pAllocation, *pImage);
    14581  if(res >= 0)
    14582  {
    14583  // All steps succeeded.
    14584  #if VMA_STATS_STRING_ENABLED
    14585  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
    14586  #endif
    14587  if(pAllocationInfo != VMA_NULL)
    14588  {
    14589  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14590  }
    14591 
    14592  return VK_SUCCESS;
    14593  }
    14594  allocator->FreeMemory(
    14595  1, // allocationCount
    14596  pAllocation);
    14597  *pAllocation = VK_NULL_HANDLE;
    14598  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14599  *pImage = VK_NULL_HANDLE;
    14600  return res;
    14601  }
    14602  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14603  *pImage = VK_NULL_HANDLE;
    14604  return res;
    14605  }
    14606  return res;
    14607 }
    14608 
    14609 void vmaDestroyImage(
    14610  VmaAllocator allocator,
    14611  VkImage image,
    14612  VmaAllocation allocation)
    14613 {
    14614  VMA_ASSERT(allocator);
    14615 
    14616  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14617  {
    14618  return;
    14619  }
    14620 
    14621  VMA_DEBUG_LOG("vmaDestroyImage");
    14622 
    14623  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14624 
    14625 #if VMA_RECORDING_ENABLED
    14626  if(allocator->GetRecorder() != VMA_NULL)
    14627  {
    14628  allocator->GetRecorder()->RecordDestroyImage(
    14629  allocator->GetCurrentFrameIndex(),
    14630  allocation);
    14631  }
    14632 #endif
    14633 
    14634  if(image != VK_NULL_HANDLE)
    14635  {
    14636  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    14637  }
    14638  if(allocation != VK_NULL_HANDLE)
    14639  {
    14640  allocator->FreeMemory(
    14641  1, // allocationCount
    14642  &allocation);
    14643  }
    14644 }
    14645 
    14646 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:1567
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:1868
    void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
    Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
    VkPhysicalDevice physicalDevice
    Vulkan physical device.
    Definition: vk_mem_alloc.h:1624
    @@ -82,7 +82,7 @@ $(function() {
    PFN_vkMapMemory vkMapMemory
    Definition: vk_mem_alloc.h:1571
    VkDeviceMemory deviceMemory
    Handle to Vulkan memory object.
    Definition: vk_mem_alloc.h:2290
    VmaAllocatorCreateFlags flags
    Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1621
    -
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:2535
    +
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:2586
    Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
    Definition: vk_mem_alloc.h:2079
    #define VMA_RECORDING_ENABLED
    Definition: vk_mem_alloc.h:1468
    void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
    Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
    @@ -102,17 +102,18 @@ $(function() {
    VkDeviceSize allocationSizeMax
    Definition: vk_mem_alloc.h:1758
    PFN_vkBindImageMemory vkBindImageMemory
    Definition: vk_mem_alloc.h:1576
    VkDeviceSize unusedBytes
    Total number of bytes occupied by unused ranges.
    Definition: vk_mem_alloc.h:1757
    -
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:2539
    +
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:2590
    void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
    Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:1650
    VmaStatInfo total
    Definition: vk_mem_alloc.h:1767
    -
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:2547
    +
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:2598
    VmaAllocationCreateFlags flags
    Use VmaAllocationCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1962
    -
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2530
    +
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2581
    PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
    Definition: vk_mem_alloc.h:1577
    void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called after successful vkAllocateMemory.
    Definition: vk_mem_alloc.h:1502
    Represents main object of this library initialized.
    VkDevice device
    Vulkan device.
    Definition: vk_mem_alloc.h:1627
    +
    void vmaFreeMemoryPages(VmaAllocator allocator, size_t allocationCount, VmaAllocation *pAllocations)
    Frees memory and destroys multiple allocations.
    VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
    Binds buffer to allocation.
    Describes parameter of created VmaPool.
    Definition: vk_mem_alloc.h:2121
    Definition: vk_mem_alloc.h:2115
    @@ -131,10 +132,10 @@ $(function() {
    VmaMemoryUsage
    Definition: vk_mem_alloc.h:1803
    struct VmaAllocationInfo VmaAllocationInfo
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    Flushes memory of given allocation.
    -
    Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:2525
    +
    Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:2576
    struct VmaPoolCreateInfo VmaPoolCreateInfo
    Describes parameter of created VmaPool.
    void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
    Destroys VmaPool object and frees Vulkan device memory.
    -
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
    Definition: vk_mem_alloc.h:2543
    +
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
    Definition: vk_mem_alloc.h:2594
    Definition: vk_mem_alloc.h:1842
    uint32_t memoryTypeBits
    Bitmask containing one bit set for every memory type acceptable for this allocation.
    Definition: vk_mem_alloc.h:1986
    PFN_vkBindBufferMemory vkBindBufferMemory
    Definition: vk_mem_alloc.h:1575
    @@ -151,7 +152,7 @@ $(function() {
    struct VmaStatInfo VmaStatInfo
    Calculated statistics of memory usage in entire allocator.
    VkFlags VmaRecordFlags
    Definition: vk_mem_alloc.h:1600
    Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
    Definition: vk_mem_alloc.h:1534
    -
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:2545
    +
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:2596
    void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
    Creates new allocation that is in lost state from the beginning.
    VkMemoryPropertyFlags requiredFlags
    Flags that must be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:1973
    VkDeviceSize unusedRangeSizeMax
    Size of the largest continuous free memory region available for new allocation.
    Definition: vk_mem_alloc.h:2187
    @@ -201,6 +202,7 @@ $(function() {
    void * pUserData
    Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
    Definition: vk_mem_alloc.h:2314
    VkDeviceSize preferredLargeHeapBlockSize
    Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
    Definition: vk_mem_alloc.h:1630
    VkDeviceSize allocationSizeAvg
    Definition: vk_mem_alloc.h:1758
    +
    VkResult vmaAllocateMemoryPages(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo)
    General purpose memory allocation for multiple allocation objects at once.
    VkDeviceSize usedBytes
    Total number of bytes occupied by all allocations.
    Definition: vk_mem_alloc.h:1755
    struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    Checks magic number in margins around all allocations in given memory types (in both default and cust...
    @@ -210,7 +212,7 @@ $(function() {
    VkDeviceSize offset
    Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
    Definition: vk_mem_alloc.h:2295
    Definition: vk_mem_alloc.h:1943
    Definition: vk_mem_alloc.h:1955
    -
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2541
    +
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2592
    Pointers to some Vulkan functions - a subset used by the library.
    Definition: vk_mem_alloc.h:1566
    VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
    Creates Allocator object.
    uint32_t unusedRangeCount
    Number of free ranges of memory between allocations.
    Definition: vk_mem_alloc.h:1753
    diff --git a/src/SparseBindingTest.cpp b/src/SparseBindingTest.cpp index 26a1bfb..02be000 100644 --- a/src/SparseBindingTest.cpp +++ b/src/SparseBindingTest.cpp @@ -21,7 +21,7 @@ void SaveAllocatorStatsToFile(const wchar_t* filePath); class BaseImage { public: - virtual VkResult Init(RandomNumberGenerator& rand) = 0; + virtual void Init(RandomNumberGenerator& rand) = 0; virtual ~BaseImage(); protected: @@ -33,7 +33,7 @@ protected: class TraditionalImage : public BaseImage { public: - virtual VkResult Init(RandomNumberGenerator& rand); + virtual void Init(RandomNumberGenerator& rand); virtual ~TraditionalImage(); private: @@ -43,7 +43,7 @@ private: class SparseBindingImage : public BaseImage { public: - virtual VkResult Init(RandomNumberGenerator& rand); + virtual void Init(RandomNumberGenerator& rand); virtual ~SparseBindingImage(); private: @@ -73,7 +73,7 @@ void BaseImage::FillImageCreateInfo(VkImageCreateInfo& outInfo, RandomNumberGene outInfo.extent.height = rand.Generate() % (imageSizeMax - imageSizeMin) + imageSizeMin; outInfo.extent.depth = 1; outInfo.mipLevels = 1; // TODO ? - outInfo.arrayLayers = 1; // TODO ? + outInfo.arrayLayers = 1; outInfo.format = VK_FORMAT_R8G8B8A8_UNORM; outInfo.tiling = VK_IMAGE_TILING_OPTIMAL; outInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; @@ -85,7 +85,7 @@ void BaseImage::FillImageCreateInfo(VkImageCreateInfo& outInfo, RandomNumberGene //////////////////////////////////////////////////////////////////////////////// // class TraditionalImage -VkResult TraditionalImage::Init(RandomNumberGenerator& rand) +void TraditionalImage::Init(RandomNumberGenerator& rand) { VkImageCreateInfo imageCreateInfo; FillImageCreateInfo(imageCreateInfo, rand); @@ -95,10 +95,8 @@ VkResult TraditionalImage::Init(RandomNumberGenerator& rand) // Default BEST_FIT is clearly better. //allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT; - const VkResult res = vmaCreateImage(g_hAllocator, &imageCreateInfo, &allocCreateInfo, - &m_Image, &m_Allocation, nullptr); - - return res; + ERR_GUARD_VULKAN( vmaCreateImage(g_hAllocator, &imageCreateInfo, &allocCreateInfo, + &m_Image, &m_Allocation, nullptr) ); } TraditionalImage::~TraditionalImage() @@ -112,7 +110,7 @@ TraditionalImage::~TraditionalImage() //////////////////////////////////////////////////////////////////////////////// // class SparseBindingImage -VkResult SparseBindingImage::Init(RandomNumberGenerator& rand) +void SparseBindingImage::Init(RandomNumberGenerator& rand) { assert(g_SparseBindingEnabled && g_hSparseBindingQueue); @@ -120,11 +118,7 @@ VkResult SparseBindingImage::Init(RandomNumberGenerator& rand) VkImageCreateInfo imageCreateInfo; FillImageCreateInfo(imageCreateInfo, rand); imageCreateInfo.flags |= VK_IMAGE_CREATE_SPARSE_BINDING_BIT; - VkResult res = vkCreateImage(g_hDevice, &imageCreateInfo, nullptr, &m_Image); - if(res != VK_SUCCESS) - { - return res; - } + ERR_GUARD_VULKAN( vkCreateImage(g_hDevice, &imageCreateInfo, nullptr, &m_Image) ); // Get memory requirements. VkMemoryRequirements imageMemReq; @@ -152,20 +146,16 @@ VkResult SparseBindingImage::Init(RandomNumberGenerator& rand) m_Allocations.resize(pageCount); std::fill(m_Allocations.begin(), m_Allocations.end(), nullptr); std::vector binds{pageCount}; - VmaAllocationInfo allocInfo; + std::vector allocInfo{pageCount}; + ERR_GUARD_VULKAN( vmaAllocateMemoryPages(g_hAllocator, &pageMemReq, &allocCreateInfo, pageCount, m_Allocations.data(), allocInfo.data()) ); + for(uint32_t i = 0; i < pageCount; ++i) { - res = vmaAllocateMemory(g_hAllocator, &pageMemReq, &allocCreateInfo, &m_Allocations[i], &allocInfo); - if(res != VK_SUCCESS) - { - return res; - } - binds[i] = {}; binds[i].resourceOffset = pageSize * i; binds[i].size = pageSize; - binds[i].memory = allocInfo.deviceMemory; - binds[i].memoryOffset = allocInfo.offset; + binds[i].memory = allocInfo[i].deviceMemory; + binds[i].memoryOffset = allocInfo[i].offset; } VkSparseImageOpaqueMemoryBindInfo imageBindInfo; @@ -180,16 +170,11 @@ VkResult SparseBindingImage::Init(RandomNumberGenerator& rand) ERR_GUARD_VULKAN( vkResetFences(g_hDevice, 1, &g_ImmediateFence) ); ERR_GUARD_VULKAN( vkQueueBindSparse(g_hSparseBindingQueue, 1, &bindSparseInfo, g_ImmediateFence) ); ERR_GUARD_VULKAN( vkWaitForFences(g_hDevice, 1, &g_ImmediateFence, VK_TRUE, UINT64_MAX) ); - - return VK_SUCCESS; } SparseBindingImage::~SparseBindingImage() { - for(size_t i = m_Allocations.size(); i--; ) - { - vmaFreeMemory(g_hAllocator, m_Allocations[i]); - } + vmaFreeMemoryPages(g_hAllocator, m_Allocations.size(), m_Allocations.data()); } //////////////////////////////////////////////////////////////////////////////// @@ -223,11 +208,9 @@ void TestSparseBinding() ImageInfo imageInfo; //imageInfo.image = std::make_unique(); imageInfo.image = std::make_unique(); - if(imageInfo.image->Init(rand) == VK_SUCCESS) - { - imageInfo.endFrame = g_FrameIndex + rand.Generate() % (imageLifeFramesMax - imageLifeFramesMin) + imageLifeFramesMin; - images.push_back(std::move(imageInfo)); - } + imageInfo.image->Init(rand); + imageInfo.endFrame = g_FrameIndex + rand.Generate() % (imageLifeFramesMax - imageLifeFramesMin) + imageLifeFramesMin; + images.push_back(std::move(imageInfo)); // Delete all images that expired. for(size_t i = images.size(); i--; ) diff --git a/src/Tests.cpp b/src/Tests.cpp index 0940573..a5378a5 100644 --- a/src/Tests.cpp +++ b/src/Tests.cpp @@ -18,8 +18,8 @@ enum CONFIG_TYPE { CONFIG_TYPE_COUNT }; -//static constexpr CONFIG_TYPE ConfigType = CONFIG_TYPE_SMALL; -static constexpr CONFIG_TYPE ConfigType = CONFIG_TYPE_LARGE; +static constexpr CONFIG_TYPE ConfigType = CONFIG_TYPE_SMALL; +//static constexpr CONFIG_TYPE ConfigType = CONFIG_TYPE_LARGE; enum class FREE_ORDER { FORWARD, BACKWARD, RANDOM, COUNT }; diff --git a/src/vk_mem_alloc.h b/src/vk_mem_alloc.h index 2c4f5ed..119a63e 100644 --- a/src/vk_mem_alloc.h +++ b/src/vk_mem_alloc.h @@ -2319,7 +2319,7 @@ typedef struct VmaAllocationInfo { @param[out] pAllocation Handle to allocated memory. @param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). -You should free the memory using vmaFreeMemory(). +You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages(). It is recommended to use vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(), vmaCreateBuffer(), vmaCreateImage() instead whenever possible. @@ -2331,6 +2331,33 @@ VkResult vmaAllocateMemory( VmaAllocation* pAllocation, VmaAllocationInfo* pAllocationInfo); +/** \brief General purpose memory allocation for multiple allocation objects at once. + +@param allocator Allocator object. +@param pVkMemoryRequirements Memory requirements for each allocation. +@param pCreateInfo Creation parameters for each alloction. +@param allocationCount Number of allocations to make. +@param[out] pAllocations Pointer to array that will be filled with handles to created allocations. +@param[out] pAlocationInfo Optional. Pointer to array that will be filled with parameters of created allocations. + +You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages(). + +Word "pages" is just a suggestion to use this function to allocate pieces of memory needed for sparse binding. +It is just a general purpose allocation function able to make multiple allocations at once. +It may be internally optimized to be more efficient than calling vmaAllocateMemory() `allocationCount` times. + +All allocations are made using same parameters. All of them are created out of the same memory pool and type. +If any allocation fails, all allocations already made within this function call are also freed, so that when +returned result is not `VK_SUCCESS`, `pAllocation` array is always entirely filled with `VK_NULL_HANDLE`. +*/ +VkResult vmaAllocateMemoryPages( + VmaAllocator allocator, + const VkMemoryRequirements* pVkMemoryRequirements, + const VmaAllocationCreateInfo* pCreateInfo, + size_t allocationCount, + VmaAllocation* pAllocations, + VmaAllocationInfo* pAllocationInfo); + /** @param[out] pAllocation Handle to allocated memory. @param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). @@ -2352,11 +2379,29 @@ VkResult vmaAllocateMemoryForImage( VmaAllocation* pAllocation, VmaAllocationInfo* pAllocationInfo); -/// Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage(). +/** \brief Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage(). + +Passing `VK_NULL_HANDLE` as `allocation` is valid. Such function call is just skipped. +*/ void vmaFreeMemory( VmaAllocator allocator, VmaAllocation allocation); +/** \brief Frees memory and destroys multiple allocations. + +Word "pages" is just a suggestion to use this function to free pieces of memory used for sparse binding. +It is just a general purpose function to free memory and destroy allocations made using e.g. vmaAllocateMemory(), +vmaAllocateMemoryPages() and other functions. +It may be internally optimized to be more efficient than calling vmaFreeMemory() `allocationCount` times. + +Allocations in `pAllocations` array can come from any memory pools and types. +Passing `VK_NULL_HANDLE` as elements of `pAllocations` array is valid. Such entries are just skipped. +*/ +void vmaFreeMemoryPages( + VmaAllocator allocator, + size_t allocationCount, + VmaAllocation* pAllocations); + /** \brief Returns current information about specified allocation and atomically marks it as used in current frame. Current paramters of given allocation are returned in `pAllocationInfo`. @@ -5302,7 +5347,8 @@ public: VkDeviceSize alignment, const VmaAllocationCreateInfo& createInfo, VmaSuballocationType suballocType, - VmaAllocation* pAllocation); + size_t allocationCount, + VmaAllocation* pAllocations); void Free( VmaAllocation hAllocation); @@ -5362,6 +5408,15 @@ private: // after this call. void IncrementallySortBlocks(); + VkResult AllocatePage( + VmaPool hCurrentPool, + uint32_t currentFrameIndex, + VkDeviceSize size, + VkDeviceSize alignment, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + VmaAllocation* pAllocation); + // To be used only without CAN_MAKE_OTHER_LOST flag. VkResult AllocateFromBlock( VmaDeviceMemoryBlock* pBlock, @@ -5729,10 +5784,13 @@ public: VkImage dedicatedImage, const VmaAllocationCreateInfo& createInfo, VmaSuballocationType suballocType, - VmaAllocation* pAllocation); + size_t allocationCount, + VmaAllocation* pAllocations); // Main deallocation function. - void FreeMemory(const VmaAllocation allocation); + void FreeMemory( + size_t allocationCount, + const VmaAllocation* pAllocations); void CalculateStats(VmaStats* pStats); @@ -5811,9 +5869,21 @@ private: const VmaAllocationCreateInfo& createInfo, uint32_t memTypeIndex, VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations); + + // Helper function only to be used inside AllocateDedicatedMemory. + VkResult AllocateDedicatedMemoryPage( + VkDeviceSize size, + VmaSuballocationType suballocType, + uint32_t memTypeIndex, + const VkMemoryAllocateInfo& allocInfo, + bool map, + bool isUserDataString, + void* pUserData, VmaAllocation* pAllocation); - // Allocates and registers new VkDeviceMemory specifically for single allocation. + // Allocates and registers new VkDeviceMemory specifically for dedicated allocations. VkResult AllocateDedicatedMemory( VkDeviceSize size, VmaSuballocationType suballocType, @@ -5823,7 +5893,8 @@ private: void* pUserData, VkBuffer dedicatedBuffer, VkImage dedicatedImage, - VmaAllocation* pAllocation); + size_t allocationCount, + VmaAllocation* pAllocations); // Tries to free pMemory as Dedicated Memory. Returns true if found and freed. void FreeDedicatedMemory(VmaAllocation allocation); @@ -10213,6 +10284,51 @@ bool VmaBlockVector::IsCorruptionDetectionEnabled() const static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32; VkResult VmaBlockVector::Allocate( + VmaPool hCurrentPool, + uint32_t currentFrameIndex, + VkDeviceSize size, + VkDeviceSize alignment, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations) +{ + size_t allocIndex; + VkResult res = VK_SUCCESS; + + { + VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex); + for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + { + res = AllocatePage( + hCurrentPool, + currentFrameIndex, + size, + alignment, + createInfo, + suballocType, + pAllocations + allocIndex); + if(res != VK_SUCCESS) + { + break; + } + } + } + + if(res != VK_SUCCESS) + { + // Free all already created allocations. + while(allocIndex--) + { + Free(pAllocations[allocIndex]); + } + memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount); + } + + return res; +} + +VkResult VmaBlockVector::AllocatePage( VmaPool hCurrentPool, uint32_t currentFrameIndex, VkDeviceSize size, @@ -10264,8 +10380,6 @@ VkResult VmaBlockVector::Allocate( return VK_ERROR_OUT_OF_DEVICE_MEMORY; } - VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex); - /* Under certain condition, this whole section can be skipped for optimization, so we move on directly to trying to allocate with canMakeOtherLost. That's the case @@ -12035,10 +12149,11 @@ VkResult VmaAllocator_T::AllocateMemoryOfType( const VmaAllocationCreateInfo& createInfo, uint32_t memTypeIndex, VmaSuballocationType suballocType, - VmaAllocation* pAllocation) + size_t allocationCount, + VmaAllocation* pAllocations) { - VMA_ASSERT(pAllocation != VMA_NULL); - VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size); + VMA_ASSERT(pAllocations != VMA_NULL); + VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, vkMemReq.size); VmaAllocationCreateInfo finalCreateInfo = createInfo; @@ -12083,7 +12198,8 @@ VkResult VmaAllocator_T::AllocateMemoryOfType( finalCreateInfo.pUserData, dedicatedBuffer, dedicatedImage, - pAllocation); + allocationCount, + pAllocations); } } else @@ -12095,7 +12211,8 @@ VkResult VmaAllocator_T::AllocateMemoryOfType( alignment, finalCreateInfo, suballocType, - pAllocation); + allocationCount, + pAllocations); if(res == VK_SUCCESS) { return res; @@ -12117,7 +12234,8 @@ VkResult VmaAllocator_T::AllocateMemoryOfType( finalCreateInfo.pUserData, dedicatedBuffer, dedicatedImage, - pAllocation); + allocationCount, + pAllocations); if(res == VK_SUCCESS) { // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here. @@ -12143,9 +12261,10 @@ VkResult VmaAllocator_T::AllocateDedicatedMemory( void* pUserData, VkBuffer dedicatedBuffer, VkImage dedicatedImage, - VmaAllocation* pAllocation) + size_t allocationCount, + VmaAllocation* pAllocations) { - VMA_ASSERT(pAllocation); + VMA_ASSERT(allocationCount > 0 && pAllocations); VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO }; allocInfo.memoryTypeIndex = memTypeIndex; @@ -12169,7 +12288,80 @@ VkResult VmaAllocator_T::AllocateDedicatedMemory( } #endif // #if VMA_DEDICATED_ALLOCATION - // Allocate VkDeviceMemory. + size_t allocIndex; + VkResult res; + for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + { + res = AllocateDedicatedMemoryPage( + size, + suballocType, + memTypeIndex, + allocInfo, + map, + isUserDataString, + pUserData, + pAllocations + allocIndex); + if(res != VK_SUCCESS) + { + break; + } + } + + if(res == VK_SUCCESS) + { + // Register them in m_pDedicatedAllocations. + { + VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex); + AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex]; + VMA_ASSERT(pDedicatedAllocations); + for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + { + VmaVectorInsertSorted(*pDedicatedAllocations, pAllocations[allocIndex]); + } + } + + VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex); + } + else + { + // Free all already created allocations. + while(allocIndex--) + { + VmaAllocation currAlloc = pAllocations[allocIndex]; + VkDeviceMemory hMemory = currAlloc->GetMemory(); + + /* + There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory + before vkFreeMemory. + + if(currAlloc->GetMappedData() != VMA_NULL) + { + (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory); + } + */ + + FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory); + + currAlloc->SetUserData(this, VMA_NULL); + vma_delete(this, currAlloc); + } + + memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount); + } + + return res; +} + +VkResult VmaAllocator_T::AllocateDedicatedMemoryPage( + VkDeviceSize size, + VmaSuballocationType suballocType, + uint32_t memTypeIndex, + const VkMemoryAllocateInfo& allocInfo, + bool map, + bool isUserDataString, + void* pUserData, + VmaAllocation* pAllocation) +{ VkDeviceMemory hMemory = VK_NULL_HANDLE; VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory); if(res < 0) @@ -12204,16 +12396,6 @@ VkResult VmaAllocator_T::AllocateDedicatedMemory( FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED); } - // Register it in m_pDedicatedAllocations. - { - VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex); - AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex]; - VMA_ASSERT(pDedicatedAllocations); - VmaVectorInsertSorted(*pDedicatedAllocations, *pAllocation); - } - - VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex); - return VK_SUCCESS; } @@ -12289,8 +12471,11 @@ VkResult VmaAllocator_T::AllocateMemory( VkImage dedicatedImage, const VmaAllocationCreateInfo& createInfo, VmaSuballocationType suballocType, - VmaAllocation* pAllocation) + size_t allocationCount, + VmaAllocation* pAllocations) { + memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount); + VMA_ASSERT(VmaIsPow2(vkMemReq.alignment)); if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 && @@ -12337,7 +12522,8 @@ VkResult VmaAllocator_T::AllocateMemory( alignmentForPool, createInfo, suballocType, - pAllocation); + allocationCount, + pAllocations); } else { @@ -12360,7 +12546,8 @@ VkResult VmaAllocator_T::AllocateMemory( createInfo, memTypeIndex, suballocType, - pAllocation); + allocationCount, + pAllocations); // Succeeded on first try. if(res == VK_SUCCESS) { @@ -12390,7 +12577,8 @@ VkResult VmaAllocator_T::AllocateMemory( createInfo, memTypeIndex, suballocType, - pAllocation); + allocationCount, + pAllocations); // Allocation from this alternative memory type succeeded. if(res == VK_SUCCESS) { @@ -12413,45 +12601,55 @@ VkResult VmaAllocator_T::AllocateMemory( } } -void VmaAllocator_T::FreeMemory(const VmaAllocation allocation) +void VmaAllocator_T::FreeMemory( + size_t allocationCount, + const VmaAllocation* pAllocations) { - VMA_ASSERT(allocation); + VMA_ASSERT(pAllocations); - if(TouchAllocation(allocation)) + for(size_t allocIndex = allocationCount; allocIndex--; ) { - if(VMA_DEBUG_INITIALIZE_ALLOCATIONS) - { - FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED); - } + VmaAllocation allocation = pAllocations[allocIndex]; - switch(allocation->GetType()) + if(allocation != VK_NULL_HANDLE) { - case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + if(TouchAllocation(allocation)) { - VmaBlockVector* pBlockVector = VMA_NULL; - VmaPool hPool = allocation->GetPool(); - if(hPool != VK_NULL_HANDLE) + if(VMA_DEBUG_INITIALIZE_ALLOCATIONS) { - pBlockVector = &hPool->m_BlockVector; + FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED); } - else + + switch(allocation->GetType()) { - const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); - pBlockVector = m_pBlockVectors[memTypeIndex]; + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaBlockVector* pBlockVector = VMA_NULL; + VmaPool hPool = allocation->GetPool(); + if(hPool != VK_NULL_HANDLE) + { + pBlockVector = &hPool->m_BlockVector; + } + else + { + const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); + pBlockVector = m_pBlockVectors[memTypeIndex]; + } + pBlockVector->Free(allocation); + } + break; + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + FreeDedicatedMemory(allocation); + break; + default: + VMA_ASSERT(0); } - pBlockVector->Free(allocation); } - break; - case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: - FreeDedicatedMemory(allocation); - break; - default: - VMA_ASSERT(0); + + allocation->SetUserData(this, VMA_NULL); + vma_delete(this, allocation); } } - - allocation->SetUserData(this, VMA_NULL); - vma_delete(this, allocation); } void VmaAllocator_T::CalculateStats(VmaStats* pStats) @@ -13707,6 +13905,7 @@ VkResult vmaAllocateMemory( VK_NULL_HANDLE, // dedicatedImage *pCreateInfo, VMA_SUBALLOCATION_TYPE_UNKNOWN, + 1, // allocationCount pAllocation); #if VMA_RECORDING_ENABLED @@ -13728,6 +13927,61 @@ VkResult vmaAllocateMemory( return result; } +VkResult vmaAllocateMemoryPages( + VmaAllocator allocator, + const VkMemoryRequirements* pVkMemoryRequirements, + const VmaAllocationCreateInfo* pCreateInfo, + size_t allocationCount, + VmaAllocation* pAllocations, + VmaAllocationInfo* pAllocationInfo) +{ + if(allocationCount == 0) + { + return VK_SUCCESS; + } + + VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations); + + VMA_DEBUG_LOG("vmaAllocateMemoryPages"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkResult result = allocator->AllocateMemory( + *pVkMemoryRequirements, + false, // requiresDedicatedAllocation + false, // prefersDedicatedAllocation + VK_NULL_HANDLE, // dedicatedBuffer + VK_NULL_HANDLE, // dedicatedImage + *pCreateInfo, + VMA_SUBALLOCATION_TYPE_UNKNOWN, + allocationCount, + pAllocations); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + // TODO: Extend recording format with this function. + /* + allocator->GetRecorder()->RecordAllocateMemoryPages( + allocator->GetCurrentFrameIndex(), + *pVkMemoryRequirements, + *pCreateInfo, + *pAllocation); + */ + } +#endif + + if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS) + { + for(size_t i = 0; i < allocationCount; ++i) + { + allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i); + } + } + + return result; +} + VkResult vmaAllocateMemoryForBuffer( VmaAllocator allocator, VkBuffer buffer, @@ -13756,6 +14010,7 @@ VkResult vmaAllocateMemoryForBuffer( VK_NULL_HANDLE, // dedicatedImage *pCreateInfo, VMA_SUBALLOCATION_TYPE_BUFFER, + 1, // allocationCount pAllocation); #if VMA_RECORDING_ENABLED @@ -13806,6 +14061,7 @@ VkResult vmaAllocateMemoryForImage( image, // dedicatedImage *pCreateInfo, VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN, + 1, // allocationCount pAllocation); #if VMA_RECORDING_ENABLED @@ -13853,7 +14109,40 @@ void vmaFreeMemory( } #endif - allocator->FreeMemory(allocation); + allocator->FreeMemory( + 1, // allocationCount + &allocation); +} + +void vmaFreeMemoryPages( + VmaAllocator allocator, + size_t allocationCount, + VmaAllocation* pAllocations) +{ + if(allocationCount == 0) + { + return; + } + + VMA_ASSERT(allocator); + + VMA_DEBUG_LOG("vmaFreeMemoryPages"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + +#if VMA_RECORDING_ENABLED + // TODO Add this to recording file format. + /* + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordFreeMemoryPages( + allocator->GetCurrentFrameIndex(), + allocation); + } + */ +#endif + + allocator->FreeMemory(allocationCount, pAllocations); } void vmaGetAllocationInfo( @@ -14137,6 +14426,7 @@ VkResult vmaCreateBuffer( VK_NULL_HANDLE, // dedicatedImage *pAllocationCreateInfo, VMA_SUBALLOCATION_TYPE_BUFFER, + 1, // allocationCount pAllocation); #if VMA_RECORDING_ENABLED @@ -14167,7 +14457,9 @@ VkResult vmaCreateBuffer( return VK_SUCCESS; } - allocator->FreeMemory(*pAllocation); + allocator->FreeMemory( + 1, // allocationCount + pAllocation); *pAllocation = VK_NULL_HANDLE; (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); *pBuffer = VK_NULL_HANDLE; @@ -14212,7 +14504,9 @@ void vmaDestroyBuffer( if(allocation != VK_NULL_HANDLE) { - allocator->FreeMemory(allocation); + allocator->FreeMemory( + 1, // allocationCount + &allocation); } } @@ -14260,6 +14554,7 @@ VkResult vmaCreateImage( *pImage, // dedicatedImage *pAllocationCreateInfo, suballocType, + 1, // allocationCount pAllocation); #if VMA_RECORDING_ENABLED @@ -14290,7 +14585,9 @@ VkResult vmaCreateImage( return VK_SUCCESS; } - allocator->FreeMemory(*pAllocation); + allocator->FreeMemory( + 1, // allocationCount + pAllocation); *pAllocation = VK_NULL_HANDLE; (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks()); *pImage = VK_NULL_HANDLE; @@ -14334,7 +14631,9 @@ void vmaDestroyImage( } if(allocation != VK_NULL_HANDLE) { - allocator->FreeMemory(allocation); + allocator->FreeMemory( + 1, // allocationCount + &allocation); } } From 2e4d3eff983f71c221ad5302809948d1112b21bc Mon Sep 17 00:00:00 2001 From: Adam Sawicki Date: Wed, 3 Oct 2018 15:48:17 +0200 Subject: [PATCH 03/10] Added BasicTestAllocatePages() - test for vmaAllocateMemoryPages, vmaFreeMemoryPages. --- src/Tests.cpp | 98 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 98 insertions(+) diff --git a/src/Tests.cpp b/src/Tests.cpp index a5378a5..e1e3650 100644 --- a/src/Tests.cpp +++ b/src/Tests.cpp @@ -4213,6 +4213,103 @@ static void BasicTestBuddyAllocator() vmaDestroyPool(g_hAllocator, pool); } +static void BasicTestAllocatePages() +{ + wprintf(L"Basic test allocate pages\n"); + + RandomNumberGenerator rand{765461}; + + VkBufferCreateInfo sampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; + sampleBufCreateInfo.size = 1024; // Whatever. + sampleBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + + VmaAllocationCreateInfo sampleAllocCreateInfo = {}; + sampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY; + + VmaPoolCreateInfo poolCreateInfo = {}; + VkResult res = vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &sampleBufCreateInfo, &sampleAllocCreateInfo, &poolCreateInfo.memoryTypeIndex); + assert(res == VK_SUCCESS); + + // 1 block of 1 MB. + poolCreateInfo.blockSize = 1024 * 1024; + poolCreateInfo.minBlockCount = poolCreateInfo.maxBlockCount = 1; + + // Create pool. + VmaPool pool = nullptr; + res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool); + assert(res == VK_SUCCESS); + + // Make 100 allocations of 4 KB - they should fit into the pool. + VkMemoryRequirements memReq; + memReq.memoryTypeBits = UINT32_MAX; + memReq.alignment = 4 * 1024; + memReq.size = 4 * 1024; + + VmaAllocationCreateInfo allocCreateInfo = {}; + allocCreateInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT; + allocCreateInfo.pool = pool; + + constexpr uint32_t allocCount = 100; + + std::vector alloc{allocCount}; + std::vector allocInfo{allocCount}; + res = vmaAllocateMemoryPages(g_hAllocator, &memReq, &allocCreateInfo, allocCount, alloc.data(), allocInfo.data()); + assert(res == VK_SUCCESS); + for(uint32_t i = 0; i < allocCount; ++i) + { + assert(alloc[i] != VK_NULL_HANDLE && + allocInfo[i].pMappedData != nullptr && + allocInfo[i].deviceMemory == allocInfo[0].deviceMemory && + allocInfo[i].memoryType == allocInfo[0].memoryType); + } + + // Free the allocations. + vmaFreeMemoryPages(g_hAllocator, allocCount, alloc.data()); + std::fill(alloc.begin(), alloc.end(), nullptr); + std::fill(allocInfo.begin(), allocInfo.end(), VmaAllocationInfo{}); + + // Try to make 100 allocations of 100 KB. This call should fail due to not enough memory. + // Also test optional allocationInfo = null. + memReq.size = 100 * 1024; + res = vmaAllocateMemoryPages(g_hAllocator, &memReq, &allocCreateInfo, allocCount, alloc.data(), nullptr); + assert(res != VK_SUCCESS); + assert(std::find_if(alloc.begin(), alloc.end(), [](VmaAllocation alloc){ return alloc != VK_NULL_HANDLE; }) == alloc.end()); + + // Make 100 allocations of 4 KB, but with required alignment of 128 KB. This should also fail. + memReq.size = 4 * 1024; + memReq.alignment = 128 * 1024; + res = vmaAllocateMemoryPages(g_hAllocator, &memReq, &allocCreateInfo, allocCount, alloc.data(), allocInfo.data()); + assert(res != VK_SUCCESS); + + // Make 100 dedicated allocations of 4 KB. + memReq.alignment = 4 * 1024; + memReq.size = 4 * 1024; + + VmaAllocationCreateInfo dedicatedAllocCreateInfo = {}; + dedicatedAllocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY; + dedicatedAllocCreateInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT | VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; + res = vmaAllocateMemoryPages(g_hAllocator, &memReq, &dedicatedAllocCreateInfo, allocCount, alloc.data(), allocInfo.data()); + assert(res == VK_SUCCESS); + for(uint32_t i = 0; i < allocCount; ++i) + { + assert(alloc[i] != VK_NULL_HANDLE && + allocInfo[i].pMappedData != nullptr && + allocInfo[i].memoryType == allocInfo[0].memoryType && + allocInfo[i].offset == 0); + if(i > 0) + { + assert(allocInfo[i].deviceMemory != allocInfo[0].deviceMemory); + } + } + + // Free the allocations. + vmaFreeMemoryPages(g_hAllocator, allocCount, alloc.data()); + std::fill(alloc.begin(), alloc.end(), nullptr); + std::fill(allocInfo.begin(), allocInfo.end(), VmaAllocationInfo{}); + + vmaDestroyPool(g_hAllocator, pool); +} + void Test() { wprintf(L"TESTING:\n"); @@ -4246,6 +4343,7 @@ void Test() TestLinearAllocatorMultiBlock(); BasicTestBuddyAllocator(); + BasicTestAllocatePages(); { FILE* file; From 4868c1f52357a622b106911be819b532a6f3a73a Mon Sep 17 00:00:00 2001 From: Adam Sawicki Date: Wed, 3 Oct 2018 15:57:11 +0200 Subject: [PATCH 04/10] Fixed tests for NVIDIA, where it asserted with OUT_OF_MEMORY, probably due to higher alignment requirements. --- src/Tests.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Tests.cpp b/src/Tests.cpp index e1e3650..c194c25 100644 --- a/src/Tests.cpp +++ b/src/Tests.cpp @@ -2199,9 +2199,9 @@ static void BenchmarkAlgorithmsCase(FILE* file, if(!empty) { - // Make allocations up to half of pool size. + // Make allocations up to 1/3 of pool size. VkDeviceSize totalSize = 0; - while(totalSize < poolCreateInfo.blockSize / 2) + while(totalSize < poolCreateInfo.blockSize / 3) { memReq.size = bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin); res = vmaAllocateMemory(g_hAllocator, &memReq, &allocCreateInfo, &alloc, nullptr); @@ -2221,7 +2221,7 @@ static void BenchmarkAlgorithmsCase(FILE* file, } // BENCHMARK - const size_t allocCount = maxBufCapacity / 2; + const size_t allocCount = maxBufCapacity / 3; std::vector testAllocations; testAllocations.reserve(allocCount); duration allocTotalDuration = duration::zero(); @@ -2367,7 +2367,7 @@ static void BenchmarkAlgorithms(FILE* file) BenchmarkAlgorithmsCase( file, algorithm, - emptyIndex ? 0 : 1, // empty + (emptyIndex == 0), // empty strategy, freeOrder); // freeOrder } From a7d7769959208d44f5c9f9bfe8584b854ad61012 Mon Sep 17 00:00:00 2001 From: Adam Sawicki Date: Wed, 3 Oct 2018 16:15:27 +0200 Subject: [PATCH 05/10] Replaced assert() with new macro TEST() in all tests, to check conditions also in Release configuration. --- src/Common.h | 17 +- src/SparseBindingTest.cpp | 2 +- src/Tests.cpp | 388 +++++++++++++++++++------------------- src/VulkanSample.cpp | 18 +- 4 files changed, 227 insertions(+), 198 deletions(-) diff --git a/src/Common.h b/src/Common.h index 111ccde..0e32c78 100644 --- a/src/Common.h +++ b/src/Common.h @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -25,7 +26,21 @@ typedef std::chrono::high_resolution_clock::time_point time_point; typedef std::chrono::high_resolution_clock::duration duration; -#define ERR_GUARD_VULKAN(Expr) do { VkResult res__ = (Expr); if (res__ < 0) assert(0); } while(0) +#ifdef _DEBUG + #define TEST(expr) do { \ + if(!(expr)) { \ + assert(0 && #expr); \ + } \ + } while(0) +#else + #define TEST(expr) do { \ + if(!(expr)) { \ + throw std::runtime_error("TEST FAILED: " #expr); \ + } \ + } while(0) +#endif + +#define ERR_GUARD_VULKAN(expr) TEST((expr) >= 0) extern VkPhysicalDevice g_hPhysicalDevice; extern VkDevice g_hDevice; diff --git a/src/SparseBindingTest.cpp b/src/SparseBindingTest.cpp index 02be000..661399e 100644 --- a/src/SparseBindingTest.cpp +++ b/src/SparseBindingTest.cpp @@ -128,7 +128,7 @@ void SparseBindingImage::Init(RandomNumberGenerator& rand) // But it doesn't help. Looks like a bug in Vulkan validation layers. uint32_t sparseMemReqCount = 0; vkGetImageSparseMemoryRequirements(g_hDevice, m_Image, &sparseMemReqCount, nullptr); - assert(sparseMemReqCount <= 8); + TEST(sparseMemReqCount <= 8); VkSparseImageMemoryRequirements sparseMemReq[8]; vkGetImageSparseMemoryRequirements(g_hDevice, m_Image, &sparseMemReqCount, sparseMemReq); diff --git a/src/Tests.cpp b/src/Tests.cpp index c194c25..c8445f9 100644 --- a/src/Tests.cpp +++ b/src/Tests.cpp @@ -401,7 +401,7 @@ VkResult MainTest(Result& outResult, const Config& config) } else { - assert(0); + TEST(0); } return res; }; @@ -684,14 +684,14 @@ static void CreateBuffer( { outAllocInfo.m_StartValue = (uint32_t)rand(); uint32_t* data = (uint32_t*)vmaAllocInfo.pMappedData; - assert((data != nullptr) == persistentlyMapped); + TEST((data != nullptr) == persistentlyMapped); if(!persistentlyMapped) { ERR_GUARD_VULKAN( vmaMapMemory(g_hAllocator, outAllocInfo.m_Allocation, (void**)&data) ); } uint32_t value = outAllocInfo.m_StartValue; - assert(bufCreateInfo.size % 4 == 0); + TEST(bufCreateInfo.size % 4 == 0); for(size_t i = 0; i < bufCreateInfo.size / sizeof(uint32_t); ++i) data[i] = value++; @@ -726,7 +726,7 @@ static void CreateAllocation(AllocInfo& outAllocation, VmaAllocator allocator) VkResult res = vmaCreateBuffer(allocator, &bufferInfo, &vmaMemReq, &outAllocation.m_Buffer, &outAllocation.m_Allocation, &allocInfo); outAllocation.m_BufferInfo = bufferInfo; - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); } else { @@ -752,18 +752,18 @@ static void CreateAllocation(AllocInfo& outAllocation, VmaAllocator allocator) VkResult res = vmaCreateImage(allocator, &imageInfo, &vmaMemReq, &outAllocation.m_Image, &outAllocation.m_Allocation, &allocInfo); outAllocation.m_ImageInfo = imageInfo; - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); } uint32_t* data = (uint32_t*)allocInfo.pMappedData; if(allocInfo.pMappedData == nullptr) { VkResult res = vmaMapMemory(allocator, outAllocation.m_Allocation, (void**)&data); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); } uint32_t value = outAllocation.m_StartValue; - assert(allocInfo.size % 4 == 0); + TEST(allocInfo.size % 4 == 0); for(size_t i = 0; i < allocInfo.size / sizeof(uint32_t); ++i) data[i] = value++; @@ -795,13 +795,13 @@ static void ValidateAllocationData(const AllocInfo& allocation) if(allocInfo.pMappedData == nullptr) { VkResult res = vmaMapMemory(g_hAllocator, allocation.m_Allocation, (void**)&data); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); } uint32_t value = allocation.m_StartValue; bool ok = true; size_t i; - assert(allocInfo.size % 4 == 0); + TEST(allocInfo.size % 4 == 0); for(i = 0; i < allocInfo.size / sizeof(uint32_t); ++i) { if(data[i] != value++) @@ -810,7 +810,7 @@ static void ValidateAllocationData(const AllocInfo& allocation) break; } } - assert(ok); + TEST(ok); if(allocInfo.pMappedData == nullptr) vmaUnmapMemory(g_hAllocator, allocation.m_Allocation); @@ -826,29 +826,29 @@ static void RecreateAllocationResource(AllocInfo& allocation) vkDestroyBuffer(g_hDevice, allocation.m_Buffer, nullptr); VkResult res = vkCreateBuffer(g_hDevice, &allocation.m_BufferInfo, nullptr, &allocation.m_Buffer); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); // Just to silence validation layer warnings. VkMemoryRequirements vkMemReq; vkGetBufferMemoryRequirements(g_hDevice, allocation.m_Buffer, &vkMemReq); - assert(vkMemReq.size == allocation.m_BufferInfo.size); + TEST(vkMemReq.size == allocation.m_BufferInfo.size); res = vkBindBufferMemory(g_hDevice, allocation.m_Buffer, allocInfo.deviceMemory, allocInfo.offset); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); } else { vkDestroyImage(g_hDevice, allocation.m_Image, nullptr); VkResult res = vkCreateImage(g_hDevice, &allocation.m_ImageInfo, nullptr, &allocation.m_Image); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); // Just to silence validation layer warnings. VkMemoryRequirements vkMemReq; vkGetImageMemoryRequirements(g_hDevice, allocation.m_Image, &vkMemReq); res = vkBindImageMemory(g_hDevice, allocation.m_Image, allocInfo.deviceMemory, allocInfo.offset); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); } } @@ -942,8 +942,8 @@ void TestDefragmentationSimple() VmaDefragmentationStats defragStats; Defragment(allocations.data(), allocations.size(), nullptr, &defragStats); - assert(defragStats.allocationsMoved > 0 && defragStats.bytesMoved > 0); - assert(defragStats.deviceMemoryBlocksFreed >= 1); + TEST(defragStats.allocationsMoved > 0 && defragStats.bytesMoved > 0); + TEST(defragStats.deviceMemoryBlocksFreed >= 1); ValidateAllocationsData(allocations.data(), allocations.size()); @@ -976,7 +976,7 @@ void TestDefragmentationSimple() { VmaDefragmentationStats defragStats; Defragment(allocations.data(), allocations.size(), &defragInfo, &defragStats); - assert(defragStats.allocationsMoved > 0 && defragStats.bytesMoved > 0); + TEST(defragStats.allocationsMoved > 0 && defragStats.bytesMoved > 0); } ValidateAllocationsData(allocations.data(), allocations.size()); @@ -1089,7 +1089,7 @@ void TestDefragmentationFull() VmaDefragmentationStats stats; VkResult res = vmaDefragment(g_hAllocator, vmaAllocations.data(), vmaAllocations.size(), allocationsChanged.data(), &defragmentationInfo, &stats); - assert(res >= 0); + TEST(res >= 0); float defragmentDuration = ToFloatSeconds(std::chrono::high_resolution_clock::now() - begTime); @@ -1142,15 +1142,15 @@ static void TestUserData() VkBuffer buf; VmaAllocation alloc; VmaAllocationInfo allocInfo; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); - assert(res == VK_SUCCESS); - assert(allocInfo.pUserData = numberAsPointer); + TEST(res == VK_SUCCESS); + TEST(allocInfo.pUserData = numberAsPointer); vmaGetAllocationInfo(g_hAllocator, alloc, &allocInfo); - assert(allocInfo.pUserData == numberAsPointer); + TEST(allocInfo.pUserData == numberAsPointer); vmaSetAllocationUserData(g_hAllocator, alloc, pointerToSomething); vmaGetAllocationInfo(g_hAllocator, alloc, &allocInfo); - assert(allocInfo.pUserData == pointerToSomething); + TEST(allocInfo.pUserData == pointerToSomething); vmaDestroyBuffer(g_hAllocator, buf, alloc); } @@ -1173,22 +1173,22 @@ static void TestUserData() VkBuffer buf; VmaAllocation alloc; VmaAllocationInfo allocInfo; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); - assert(res == VK_SUCCESS); - assert(allocInfo.pUserData != nullptr && allocInfo.pUserData != name1Buf); - assert(strcmp(name1, (const char*)allocInfo.pUserData) == 0); + TEST(res == VK_SUCCESS); + TEST(allocInfo.pUserData != nullptr && allocInfo.pUserData != name1Buf); + TEST(strcmp(name1, (const char*)allocInfo.pUserData) == 0); delete[] name1Buf; vmaGetAllocationInfo(g_hAllocator, alloc, &allocInfo); - assert(strcmp(name1, (const char*)allocInfo.pUserData) == 0); + TEST(strcmp(name1, (const char*)allocInfo.pUserData) == 0); vmaSetAllocationUserData(g_hAllocator, alloc, (void*)name2); vmaGetAllocationInfo(g_hAllocator, alloc, &allocInfo); - assert(strcmp(name2, (const char*)allocInfo.pUserData) == 0); + TEST(strcmp(name2, (const char*)allocInfo.pUserData) == 0); vmaSetAllocationUserData(g_hAllocator, alloc, nullptr); vmaGetAllocationInfo(g_hAllocator, alloc, &allocInfo); - assert(allocInfo.pUserData == nullptr); + TEST(allocInfo.pUserData == nullptr); vmaDestroyBuffer(g_hAllocator, buf, alloc); } @@ -1213,7 +1213,7 @@ static void TestMemoryRequirements() // No requirements. res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); vmaDestroyBuffer(g_hAllocator, buf, alloc); // Usage. @@ -1223,8 +1223,8 @@ static void TestMemoryRequirements() allocCreateInfo.memoryTypeBits = UINT32_MAX; res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); - assert(res == VK_SUCCESS); - assert(memProps->memoryTypes[allocInfo.memoryType].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); + TEST(res == VK_SUCCESS); + TEST(memProps->memoryTypes[allocInfo.memoryType].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); vmaDestroyBuffer(g_hAllocator, buf, alloc); // Required flags, preferred flags. @@ -1234,9 +1234,9 @@ static void TestMemoryRequirements() allocCreateInfo.memoryTypeBits = 0; res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); - assert(res == VK_SUCCESS); - assert(memProps->memoryTypes[allocInfo.memoryType].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); - assert(memProps->memoryTypes[allocInfo.memoryType].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); + TEST(res == VK_SUCCESS); + TEST(memProps->memoryTypes[allocInfo.memoryType].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); + TEST(memProps->memoryTypes[allocInfo.memoryType].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); vmaDestroyBuffer(g_hAllocator, buf, alloc); // memoryTypeBits. @@ -1247,8 +1247,8 @@ static void TestMemoryRequirements() allocCreateInfo.memoryTypeBits = 1u << memType; res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); - assert(res == VK_SUCCESS); - assert(allocInfo.memoryType == memType); + TEST(res == VK_SUCCESS); + TEST(allocInfo.memoryType == memType); vmaDestroyBuffer(g_hAllocator, buf, alloc); } @@ -1263,12 +1263,12 @@ static void TestBasics() { VmaAllocation alloc = VK_NULL_HANDLE; vmaCreateLostAllocation(g_hAllocator, &alloc); - assert(alloc != VK_NULL_HANDLE); + TEST(alloc != VK_NULL_HANDLE); VmaAllocationInfo allocInfo; vmaGetAllocationInfo(g_hAllocator, alloc, &allocInfo); - assert(allocInfo.deviceMemory == VK_NULL_HANDLE); - assert(allocInfo.size == 0); + TEST(allocInfo.deviceMemory == VK_NULL_HANDLE); + TEST(allocInfo.size == 0); vmaFreeMemory(g_hAllocator, alloc); } @@ -1285,7 +1285,7 @@ static void TestBasics() VkBuffer buf; VmaAllocation alloc; VmaAllocationInfo allocInfo; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); vmaDestroyBuffer(g_hAllocator, buf, alloc); @@ -1293,7 +1293,7 @@ static void TestBasics() allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); vmaDestroyBuffer(g_hAllocator, buf, alloc); } @@ -1319,7 +1319,7 @@ void TestHeapSizeLimit() VmaAllocator hAllocator; VkResult res = vmaCreateAllocator(&allocatorCreateInfo, &hAllocator); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); struct Item { @@ -1344,7 +1344,7 @@ void TestHeapSizeLimit() { Item item; res = vmaCreateBuffer(hAllocator, &bufCreateInfo, &allocCreateInfo, &item.hBuf, &item.hAlloc, &ownAllocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); items.push_back(item); } } @@ -1356,7 +1356,7 @@ void TestHeapSizeLimit() VmaPool hPool; res = vmaCreatePool(hAllocator, &poolCreateInfo, &hPool); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); // 2. Allocate normal buffers from all the remaining memory. { @@ -1370,7 +1370,7 @@ void TestHeapSizeLimit() { Item item; res = vmaCreateBuffer(hAllocator, &bufCreateInfo, &allocCreateInfo, &item.hBuf, &item.hAlloc, nullptr); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); items.push_back(item); } } @@ -1385,7 +1385,7 @@ void TestHeapSizeLimit() VkBuffer hBuf; VmaAllocation hAlloc; res = vmaCreateBuffer(hAllocator, &bufCreateInfo, &allocCreateInfo, &hBuf, &hAlloc, nullptr); - assert(res == VK_ERROR_OUT_OF_DEVICE_MEMORY); + TEST(res == VK_ERROR_OUT_OF_DEVICE_MEMORY); } // Destroy everything. @@ -1424,14 +1424,14 @@ static void TestDebugMargin() allocCreateInfo.flags = (i == BUF_COUNT - 1) ? VMA_ALLOCATION_CREATE_MAPPED_BIT : 0; VkResult res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo, &buffers[i].Buffer, &buffers[i].Allocation, &allocInfo[i]); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); // Margin is preserved also at the beginning of a block. - assert(allocInfo[i].offset >= VMA_DEBUG_MARGIN); + TEST(allocInfo[i].offset >= VMA_DEBUG_MARGIN); if(i == BUF_COUNT - 1) { // Fill with data. - assert(allocInfo[i].pMappedData != nullptr); + TEST(allocInfo[i].pMappedData != nullptr); // Uncomment this "+ 1" to overwrite past end of allocation and check corruption detection. memset(allocInfo[i].pMappedData, 0xFF, bufInfo.size /* + 1 */); } @@ -1450,12 +1450,12 @@ static void TestDebugMargin() { if(allocInfo[i].deviceMemory == allocInfo[i - 1].deviceMemory) { - assert(allocInfo[i].offset >= allocInfo[i - 1].offset + VMA_DEBUG_MARGIN); + TEST(allocInfo[i].offset >= allocInfo[i - 1].offset + VMA_DEBUG_MARGIN); } } VkResult res = vmaCheckCorruption(g_hAllocator, UINT32_MAX); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); // Destroy all buffers. for(size_t i = BUF_COUNT; i--; ) @@ -1480,7 +1480,7 @@ static void TestLinearAllocator() VmaPoolCreateInfo poolCreateInfo = {}; VkResult res = vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &sampleBufCreateInfo, &sampleAllocCreateInfo, &poolCreateInfo.memoryTypeIndex); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); poolCreateInfo.blockSize = 1024 * 300; poolCreateInfo.flags = VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT; @@ -1488,7 +1488,7 @@ static void TestLinearAllocator() VmaPool pool = nullptr; res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); VkBufferCreateInfo bufCreateInfo = sampleBufCreateInfo; @@ -1514,8 +1514,8 @@ static void TestLinearAllocator() BufferInfo newBufInfo; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); - assert(i == 0 || allocInfo.offset > prevOffset); + TEST(res == VK_SUCCESS); + TEST(i == 0 || allocInfo.offset > prevOffset); bufInfo.push_back(newBufInfo); prevOffset = allocInfo.offset; bufSumSize += bufCreateInfo.size; @@ -1524,9 +1524,9 @@ static void TestLinearAllocator() // Validate pool stats. VmaPoolStats stats; vmaGetPoolStats(g_hAllocator, pool, &stats); - assert(stats.size == poolCreateInfo.blockSize); - assert(stats.unusedSize = poolCreateInfo.blockSize - bufSumSize); - assert(stats.allocationCount == bufInfo.size()); + TEST(stats.size == poolCreateInfo.blockSize); + TEST(stats.unusedSize = poolCreateInfo.blockSize - bufSumSize); + TEST(stats.allocationCount == bufInfo.size()); // Destroy the buffers in random order. while(!bufInfo.empty()) @@ -1547,8 +1547,8 @@ static void TestLinearAllocator() BufferInfo newBufInfo; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); - assert(i == 0 || allocInfo.offset > prevOffset); + TEST(res == VK_SUCCESS); + TEST(i == 0 || allocInfo.offset > prevOffset); bufInfo.push_back(newBufInfo); prevOffset = allocInfo.offset; } @@ -1568,8 +1568,8 @@ static void TestLinearAllocator() BufferInfo newBufInfo; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); - assert(i == 0 || allocInfo.offset > prevOffset); + TEST(res == VK_SUCCESS); + TEST(i == 0 || allocInfo.offset > prevOffset); bufInfo.push_back(newBufInfo); prevOffset = allocInfo.offset; } @@ -1592,8 +1592,8 @@ static void TestLinearAllocator() BufferInfo newBufInfo; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); - assert(i == 0 || allocInfo.offset > prevOffset); + TEST(res == VK_SUCCESS); + TEST(i == 0 || allocInfo.offset > prevOffset); bufInfo.push_back(newBufInfo); prevOffset = allocInfo.offset; } @@ -1614,7 +1614,7 @@ static void TestLinearAllocator() BufferInfo newBufInfo; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); bufInfo.push_back(newBufInfo); } } @@ -1632,7 +1632,7 @@ static void TestLinearAllocator() } else { - assert(res == VK_ERROR_OUT_OF_DEVICE_MEMORY); + TEST(res == VK_ERROR_OUT_OF_DEVICE_MEMORY); } ++debugIndex; } @@ -1663,18 +1663,18 @@ static void TestLinearAllocator() BufferInfo newBufInfo; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); if(upperAddress) { - assert(allocInfo.offset < prevOffsetUpper); + TEST(allocInfo.offset < prevOffsetUpper); prevOffsetUpper = allocInfo.offset; } else { - assert(allocInfo.offset >= prevOffsetLower); + TEST(allocInfo.offset >= prevOffsetLower); prevOffsetLower = allocInfo.offset; } - assert(prevOffsetLower < prevOffsetUpper); + TEST(prevOffsetLower < prevOffsetUpper); bufInfo.push_back(newBufInfo); } @@ -1698,7 +1698,7 @@ static void TestLinearAllocator() BufferInfo newBufInfo; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); bufInfo.push_back(newBufInfo); } @@ -1729,15 +1729,15 @@ static void TestLinearAllocator() { if(upperAddress) { - assert(allocInfo.offset < prevOffsetUpper); + TEST(allocInfo.offset < prevOffsetUpper); prevOffsetUpper = allocInfo.offset; } else { - assert(allocInfo.offset >= prevOffsetLower); + TEST(allocInfo.offset >= prevOffsetLower); prevOffsetLower = allocInfo.offset; } - assert(prevOffsetLower < prevOffsetUpper); + TEST(prevOffsetLower < prevOffsetUpper); bufInfo.push_back(newBufInfo); } } @@ -1763,7 +1763,7 @@ static void TestLinearAllocator() &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); if(res == VK_SUCCESS) { - assert(allocInfo.offset < prevOffsetUpper); + TEST(allocInfo.offset < prevOffsetUpper); prevOffsetUpper = allocInfo.offset; bufInfo.push_back(newBufInfo); } @@ -1834,13 +1834,13 @@ static void TestLinearAllocator() BufferInfo newBufInfo; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); bufInfo.push_back(newBufInfo); firstNewOffset = allocInfo.offset; // Make sure at least one buffer from the beginning became lost. vmaGetAllocationInfo(g_hAllocator, bufInfo[0].Allocation, &allocInfo); - assert(allocInfo.deviceMemory == VK_NULL_HANDLE); + TEST(allocInfo.deviceMemory == VK_NULL_HANDLE); } // Allocate more buffers that CAN_MAKE_OTHER_LOST until we wrap-around with this. @@ -1854,7 +1854,7 @@ static void TestLinearAllocator() BufferInfo newBufInfo; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); bufInfo.push_back(newBufInfo); ++newCount; if(allocInfo.offset < firstNewOffset) @@ -1878,7 +1878,7 @@ static void TestLinearAllocator() size_t lostAllocCount = SIZE_MAX; vmaMakePoolAllocationsLost(g_hAllocator, pool, &lostAllocCount); - assert(lostAllocCount > 0); + TEST(lostAllocCount > 0); size_t realLostAllocCount = 0; for(size_t i = 0; i < bufInfo.size(); ++i) @@ -1887,7 +1887,7 @@ static void TestLinearAllocator() if(allocInfo.deviceMemory == VK_NULL_HANDLE) ++realLostAllocCount; } - assert(realLostAllocCount == lostAllocCount); + TEST(realLostAllocCount == lostAllocCount); } // Destroy all the buffers in forward order. @@ -1915,11 +1915,11 @@ static void TestLinearAllocatorMultiBlock() VmaPoolCreateInfo poolCreateInfo = {}; poolCreateInfo.flags = VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT; VkResult res = vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &sampleBufCreateInfo, &sampleAllocCreateInfo, &poolCreateInfo.memoryTypeIndex); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); VmaPool pool = nullptr; res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); VkBufferCreateInfo bufCreateInfo = sampleBufCreateInfo; @@ -1938,7 +1938,7 @@ static void TestLinearAllocatorMultiBlock() BufferInfo newBufInfo; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); bufInfo.push_back(newBufInfo); if(lastMem && allocInfo.deviceMemory != lastMem) { @@ -1947,12 +1947,12 @@ static void TestLinearAllocatorMultiBlock() lastMem = allocInfo.deviceMemory; } - assert(bufInfo.size() > 2); + TEST(bufInfo.size() > 2); // Make sure that pool has now two blocks. VmaPoolStats poolStats = {}; vmaGetPoolStats(g_hAllocator, pool, &poolStats); - assert(poolStats.blockCount == 2); + TEST(poolStats.blockCount == 2); // Destroy all the buffers in random order. while(!bufInfo.empty()) @@ -1965,7 +1965,7 @@ static void TestLinearAllocatorMultiBlock() // Make sure that pool has now at most one block. vmaGetPoolStats(g_hAllocator, pool, &poolStats); - assert(poolStats.blockCount <= 1); + TEST(poolStats.blockCount <= 1); } // Test stack. @@ -1977,7 +1977,7 @@ static void TestLinearAllocatorMultiBlock() BufferInfo newBufInfo; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); bufInfo.push_back(newBufInfo); if(lastMem && allocInfo.deviceMemory != lastMem) { @@ -1986,7 +1986,7 @@ static void TestLinearAllocatorMultiBlock() lastMem = allocInfo.deviceMemory; } - assert(bufInfo.size() > 2); + TEST(bufInfo.size() > 2); // Add few more buffers. for(uint32_t i = 0; i < 5; ++i) @@ -1994,14 +1994,14 @@ static void TestLinearAllocatorMultiBlock() BufferInfo newBufInfo; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); bufInfo.push_back(newBufInfo); } // Make sure that pool has now two blocks. VmaPoolStats poolStats = {}; vmaGetPoolStats(g_hAllocator, pool, &poolStats); - assert(poolStats.blockCount == 2); + TEST(poolStats.blockCount == 2); // Delete half of buffers, LIFO. for(size_t i = 0, countToDelete = bufInfo.size() / 2; i < countToDelete; ++i) @@ -2015,12 +2015,12 @@ static void TestLinearAllocatorMultiBlock() BufferInfo newBufInfo; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); bufInfo.push_back(newBufInfo); // Make sure that pool has now one block. vmaGetPoolStats(g_hAllocator, pool, &poolStats); - assert(poolStats.blockCount == 1); + TEST(poolStats.blockCount == 1); // Delete all the remaining buffers, LIFO. while(!bufInfo.empty()) @@ -2052,7 +2052,7 @@ static void ManuallyTestLinearAllocator() VmaPoolCreateInfo poolCreateInfo = {}; VkResult res = vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &sampleBufCreateInfo, &sampleAllocCreateInfo, &poolCreateInfo.memoryTypeIndex); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); poolCreateInfo.blockSize = 10 * 1024; poolCreateInfo.flags = VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT; @@ -2060,7 +2060,7 @@ static void ManuallyTestLinearAllocator() VmaPool pool = nullptr; res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); VkBufferCreateInfo bufCreateInfo = sampleBufCreateInfo; @@ -2087,19 +2087,19 @@ static void ManuallyTestLinearAllocator() bufCreateInfo.size = 32; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); bufInfo.push_back(newBufInfo); bufCreateInfo.size = 1024; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); bufInfo.push_back(newBufInfo); bufCreateInfo.size = 32; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); bufInfo.push_back(newBufInfo); allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT; @@ -2107,19 +2107,19 @@ static void ManuallyTestLinearAllocator() bufCreateInfo.size = 128; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); bufInfo.push_back(newBufInfo); bufCreateInfo.size = 1024; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); bufInfo.push_back(newBufInfo); bufCreateInfo.size = 16; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); bufInfo.push_back(newBufInfo); VmaStats currStats; @@ -2170,7 +2170,7 @@ static void BenchmarkAlgorithmsCase(FILE* file, VmaPoolCreateInfo poolCreateInfo = {}; VkResult res = vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &sampleBufCreateInfo, &sampleAllocCreateInfo, &poolCreateInfo.memoryTypeIndex); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); poolCreateInfo.blockSize = bufSizeMax * maxBufCapacity; poolCreateInfo.flags |= algorithm; @@ -2178,12 +2178,12 @@ static void BenchmarkAlgorithmsCase(FILE* file, VmaPool pool = nullptr; res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); // Buffer created just to get memory requirements. Never bound to any memory. VkBuffer dummyBuffer = VK_NULL_HANDLE; res = vkCreateBuffer(g_hDevice, &sampleBufCreateInfo, nullptr, &dummyBuffer); - assert(res == VK_SUCCESS && dummyBuffer); + TEST(res == VK_SUCCESS && dummyBuffer); VkMemoryRequirements memReq = {}; vkGetBufferMemoryRequirements(g_hDevice, dummyBuffer, &memReq); @@ -2205,7 +2205,7 @@ static void BenchmarkAlgorithmsCase(FILE* file, { memReq.size = bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin); res = vmaAllocateMemory(g_hAllocator, &memReq, &allocCreateInfo, &alloc, nullptr); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); baseAllocations.push_back(alloc); totalSize += memReq.size; } @@ -2234,7 +2234,7 @@ static void BenchmarkAlgorithmsCase(FILE* file, { memReq.size = bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin); res = vmaAllocateMemory(g_hAllocator, &memReq, &allocCreateInfo, &alloc, nullptr); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); testAllocations.push_back(alloc); } allocTotalDuration += std::chrono::high_resolution_clock::now() - allocTimeBeg; @@ -2392,7 +2392,7 @@ static void TestPool_SameSize() { VkBuffer dummyBuffer; res = vkCreateBuffer(g_hDevice, &bufferInfo, nullptr, &dummyBuffer); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); VkMemoryRequirements memReq; vkGetBufferMemoryRequirements(g_hDevice, dummyBuffer, &memReq); @@ -2419,7 +2419,7 @@ static void TestPool_SameSize() VmaPool pool; res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); vmaSetCurrentFrameIndex(g_hAllocator, 1); @@ -2440,7 +2440,7 @@ static void TestPool_SameSize() { BufItem item; res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); items.push_back(item); } @@ -2448,7 +2448,7 @@ static void TestPool_SameSize() { BufItem item; res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr); - assert(res == VK_ERROR_OUT_OF_DEVICE_MEMORY); + TEST(res == VK_ERROR_OUT_OF_DEVICE_MEMORY); } // Validate that no buffer is lost. Also check that they are not mapped. @@ -2456,8 +2456,8 @@ static void TestPool_SameSize() { VmaAllocationInfo allocInfo; vmaGetAllocationInfo(g_hAllocator, items[i].Alloc, &allocInfo); - assert(allocInfo.deviceMemory != VK_NULL_HANDLE); - assert(allocInfo.pMappedData == nullptr); + TEST(allocInfo.deviceMemory != VK_NULL_HANDLE); + TEST(allocInfo.pMappedData == nullptr); } // Free some percent of random items. @@ -2484,7 +2484,7 @@ static void TestPool_SameSize() { BufItem item; res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); items.push_back(item); } } @@ -2505,7 +2505,7 @@ static void TestPool_SameSize() { BufItem item; res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); items.push_back(item); } @@ -2514,7 +2514,7 @@ static void TestPool_SameSize() { VmaAllocationInfo allocInfo; vmaGetAllocationInfo(g_hAllocator, items[i].Alloc, &allocInfo); - assert(allocInfo.deviceMemory != VK_NULL_HANDLE); + TEST(allocInfo.deviceMemory != VK_NULL_HANDLE); } // Next frame. @@ -2525,7 +2525,7 @@ static void TestPool_SameSize() { BufItem item; res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); items.push_back(item); } @@ -2534,7 +2534,7 @@ static void TestPool_SameSize() { VmaAllocationInfo allocInfo; vmaGetAllocationInfo(g_hAllocator, items[i].Alloc, &allocInfo); - assert(allocInfo.deviceMemory == VK_NULL_HANDLE); + TEST(allocInfo.deviceMemory == VK_NULL_HANDLE); vmaDestroyBuffer(g_hAllocator, items[i].Buf, items[i].Alloc); } items.erase(items.begin(), items.begin() + BUF_COUNT); @@ -2544,7 +2544,7 @@ static void TestPool_SameSize() { VmaAllocationInfo allocInfo; vmaGetAllocationInfo(g_hAllocator, items[i].Alloc, &allocInfo); - assert(allocInfo.deviceMemory != VK_NULL_HANDLE); + TEST(allocInfo.deviceMemory != VK_NULL_HANDLE); } // Free one item. @@ -2555,11 +2555,11 @@ static void TestPool_SameSize() { VmaPoolStats poolStats = {}; vmaGetPoolStats(g_hAllocator, pool, &poolStats); - assert(poolStats.allocationCount == items.size()); - assert(poolStats.size = BUF_COUNT * BUF_SIZE); - assert(poolStats.unusedRangeCount == 1); - assert(poolStats.unusedRangeSizeMax == BUF_SIZE); - assert(poolStats.unusedSize == BUF_SIZE); + TEST(poolStats.allocationCount == items.size()); + TEST(poolStats.size = BUF_COUNT * BUF_SIZE); + TEST(poolStats.unusedRangeCount == 1); + TEST(poolStats.unusedRangeSizeMax == BUF_SIZE); + TEST(poolStats.unusedSize == BUF_SIZE); } // Free all remaining items. @@ -2572,7 +2572,7 @@ static void TestPool_SameSize() { BufItem item; res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); items.push_back(item); } @@ -2591,8 +2591,8 @@ static void TestPool_SameSize() VmaDefragmentationStats defragmentationStats; res = vmaDefragment(g_hAllocator, allocationsToDefragment.data(), items.size(), nullptr, nullptr, &defragmentationStats); - assert(res == VK_SUCCESS); - assert(defragmentationStats.deviceMemoryBlocksFreed == 2); + TEST(res == VK_SUCCESS); + TEST(defragmentationStats.deviceMemoryBlocksFreed == 2); } // Free all remaining items. @@ -2609,7 +2609,7 @@ static void TestPool_SameSize() { BufItem item; res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); items.push_back(item); } @@ -2624,11 +2624,11 @@ static void TestPool_SameSize() // vmaMakePoolAllocationsLost. Only remaining 2 should be lost. size_t lostCount = 0xDEADC0DE; vmaMakePoolAllocationsLost(g_hAllocator, pool, &lostCount); - assert(lostCount == 2); + TEST(lostCount == 2); // Make another call. Now 0 should be lost. vmaMakePoolAllocationsLost(g_hAllocator, pool, &lostCount); - assert(lostCount == 0); + TEST(lostCount == 0); // Make another call, with null count. Should not crash. vmaMakePoolAllocationsLost(g_hAllocator, pool, nullptr); @@ -2653,7 +2653,7 @@ static void TestPool_SameSize() VmaAllocation alloc = nullptr; res = vmaAllocateMemory(g_hAllocator, &memReq, &allocCreateInfo, &alloc, nullptr); - assert(res == VK_ERROR_OUT_OF_DEVICE_MEMORY && alloc == nullptr); + TEST(res == VK_ERROR_OUT_OF_DEVICE_MEMORY && alloc == nullptr); } vmaDestroyPool(g_hAllocator, pool); @@ -2692,11 +2692,11 @@ static void TestAllocationsInitialization() poolCreateInfo.minBlockCount = 1; // To keep memory alive while pool exists. poolCreateInfo.maxBlockCount = 1; res = vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &bufInfo, &dummyBufAllocCreateInfo, &poolCreateInfo.memoryTypeIndex); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); VmaAllocationCreateInfo bufAllocCreateInfo = {}; res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &bufAllocCreateInfo.pool); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); // Create one persistently mapped buffer to keep memory of this block mapped, // so that pointer to mapped data will remain (more or less...) valid even @@ -2706,7 +2706,7 @@ static void TestAllocationsInitialization() VkBuffer firstBuf; VmaAllocation firstAlloc; res = vmaCreateBuffer(g_hAllocator, &bufInfo, &bufAllocCreateInfo, &firstBuf, &firstAlloc, nullptr); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); // Test buffers. @@ -2718,13 +2718,13 @@ static void TestAllocationsInitialization() VmaAllocation alloc; VmaAllocationInfo allocInfo; res = vmaCreateBuffer(g_hAllocator, &bufInfo, &bufAllocCreateInfo, &buf, &alloc, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); void* pMappedData; if(!persistentlyMapped) { res = vmaMapMemory(g_hAllocator, alloc, &pMappedData); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); } else { @@ -2733,7 +2733,7 @@ static void TestAllocationsInitialization() // Validate initialized content bool valid = ValidatePattern(pMappedData, BUF_SIZE, 0xDC); - assert(valid); + TEST(valid); if(!persistentlyMapped) { @@ -2744,7 +2744,7 @@ static void TestAllocationsInitialization() // Validate freed content valid = ValidatePattern(pMappedData, BUF_SIZE, 0xEF); - assert(valid); + TEST(valid); } vmaDestroyBuffer(g_hAllocator, firstBuf, firstAlloc); @@ -2755,7 +2755,7 @@ static void TestPool_Benchmark( PoolTestResult& outResult, const PoolTestConfig& config) { - assert(config.ThreadCount > 0); + TEST(config.ThreadCount > 0); RandomNumberGenerator mainRand{config.RandSeed}; @@ -2788,7 +2788,7 @@ static void TestPool_Benchmark( { VkBuffer dummyBuffer; VkResult res = vkCreateBuffer(g_hDevice, &bufferInfo, nullptr, &dummyBuffer); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); VkMemoryRequirements memReq; vkGetBufferMemoryRequirements(g_hDevice, dummyBuffer, &memReq); @@ -2801,7 +2801,7 @@ static void TestPool_Benchmark( { VkImage dummyImage; VkResult res = vkCreateImage(g_hDevice, &imageInfo, nullptr, &dummyImage); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); VkMemoryRequirements memReq; vkGetImageMemoryRequirements(g_hDevice, dummyImage, &memReq); @@ -2825,7 +2825,7 @@ static void TestPool_Benchmark( else if(config.UsesImages()) memoryTypeBits = imageMemoryTypeBits; else - assert(0); + TEST(0); VmaPoolCreateInfo poolCreateInfo = {}; poolCreateInfo.memoryTypeIndex = 0; @@ -2840,7 +2840,7 @@ static void TestPool_Benchmark( VmaPool pool; VkResult res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); // Start time measurement - after creating pool and initializing data structures. time_point timeBeg = std::chrono::high_resolution_clock::now(); @@ -2899,8 +2899,8 @@ static void TestPool_Benchmark( const AllocationSize& allocSize = config.AllocationSizes[allocSizeIndex]; if(allocSize.BufferSizeMax > 0) { - assert(allocSize.BufferSizeMin > 0); - assert(allocSize.ImageSizeMin == 0 && allocSize.ImageSizeMax == 0); + TEST(allocSize.BufferSizeMin > 0); + TEST(allocSize.ImageSizeMin == 0 && allocSize.ImageSizeMax == 0); if(allocSize.BufferSizeMax == allocSize.BufferSizeMin) item.BufferSize = allocSize.BufferSizeMin; else @@ -2911,7 +2911,7 @@ static void TestPool_Benchmark( } else { - assert(allocSize.ImageSizeMin > 0 && allocSize.ImageSizeMax > 0); + TEST(allocSize.ImageSizeMin > 0 && allocSize.ImageSizeMax > 0); if(allocSize.ImageSizeMax == allocSize.ImageSizeMin) item.ImageSize.width = item.ImageSize.height = allocSize.ImageSizeMax; else @@ -2939,7 +2939,7 @@ static void TestPool_Benchmark( } else { - assert(item.ImageSize.width && item.ImageSize.height); + TEST(item.ImageSize.width && item.ImageSize.height); imageInfo.extent.width = item.ImageSize.width; imageInfo.extent.height = item.ImageSize.height; @@ -2966,7 +2966,7 @@ static void TestPool_Benchmark( // Determine which bufs we want to use in this frame. const size_t usedBufCount = (threadRand.Generate() % (config.UsedItemCountMax - config.UsedItemCountMin) + config.UsedItemCountMin) / config.ThreadCount; - assert(usedBufCount < usedItems.size() + unusedItems.size()); + TEST(usedBufCount < usedItems.size() + unusedItems.size()); // Move some used to unused. while(usedBufCount < usedItems.size()) { @@ -3100,7 +3100,7 @@ static void TestPool_Benchmark( } // Execute frames. - assert(config.ThreadCount <= MAXIMUM_WAIT_OBJECTS); + TEST(config.ThreadCount <= MAXIMUM_WAIT_OBJECTS); for(uint32_t frameIndex = 0; frameIndex < config.FrameCount; ++frameIndex) { vmaSetCurrentFrameIndex(g_hAllocator, frameIndex); @@ -3186,11 +3186,11 @@ static void TestMapping() VmaPool pool = nullptr; if(testIndex == TEST_POOL) { - assert(memTypeIndex != UINT32_MAX); + TEST(memTypeIndex != UINT32_MAX); VmaPoolCreateInfo poolInfo = {}; poolInfo.memoryTypeIndex = memTypeIndex; res = vmaCreatePool(g_hAllocator, &poolInfo, &pool); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); } VkBufferCreateInfo bufInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; @@ -3213,56 +3213,56 @@ static void TestMapping() { res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo, &bufferInfos[i].Buffer, &bufferInfos[i].Allocation, &allocInfo); - assert(res == VK_SUCCESS); - assert(allocInfo.pMappedData == nullptr); + TEST(res == VK_SUCCESS); + TEST(allocInfo.pMappedData == nullptr); memTypeIndex = allocInfo.memoryType; } // Map buffer 0. char* data00 = nullptr; res = vmaMapMemory(g_hAllocator, bufferInfos[0].Allocation, (void**)&data00); - assert(res == VK_SUCCESS && data00 != nullptr); + TEST(res == VK_SUCCESS && data00 != nullptr); data00[0xFFFF] = data00[0]; // Map buffer 0 second time. char* data01 = nullptr; res = vmaMapMemory(g_hAllocator, bufferInfos[0].Allocation, (void**)&data01); - assert(res == VK_SUCCESS && data01 == data00); + TEST(res == VK_SUCCESS && data01 == data00); // Map buffer 1. char* data1 = nullptr; res = vmaMapMemory(g_hAllocator, bufferInfos[1].Allocation, (void**)&data1); - assert(res == VK_SUCCESS && data1 != nullptr); - assert(!MemoryRegionsOverlap(data00, (size_t)bufInfo.size, data1, (size_t)bufInfo.size)); + TEST(res == VK_SUCCESS && data1 != nullptr); + TEST(!MemoryRegionsOverlap(data00, (size_t)bufInfo.size, data1, (size_t)bufInfo.size)); data1[0xFFFF] = data1[0]; // Unmap buffer 0 two times. vmaUnmapMemory(g_hAllocator, bufferInfos[0].Allocation); vmaUnmapMemory(g_hAllocator, bufferInfos[0].Allocation); vmaGetAllocationInfo(g_hAllocator, bufferInfos[0].Allocation, &allocInfo); - assert(allocInfo.pMappedData == nullptr); + TEST(allocInfo.pMappedData == nullptr); // Unmap buffer 1. vmaUnmapMemory(g_hAllocator, bufferInfos[1].Allocation); vmaGetAllocationInfo(g_hAllocator, bufferInfos[1].Allocation, &allocInfo); - assert(allocInfo.pMappedData == nullptr); + TEST(allocInfo.pMappedData == nullptr); // Create 3rd buffer - persistently mapped. allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT; res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo, &bufferInfos[2].Buffer, &bufferInfos[2].Allocation, &allocInfo); - assert(res == VK_SUCCESS && allocInfo.pMappedData != nullptr); + TEST(res == VK_SUCCESS && allocInfo.pMappedData != nullptr); // Map buffer 2. char* data2 = nullptr; res = vmaMapMemory(g_hAllocator, bufferInfos[2].Allocation, (void**)&data2); - assert(res == VK_SUCCESS && data2 == allocInfo.pMappedData); + TEST(res == VK_SUCCESS && data2 == allocInfo.pMappedData); data2[0xFFFF] = data2[0]; // Unmap buffer 2. vmaUnmapMemory(g_hAllocator, bufferInfos[2].Allocation); vmaGetAllocationInfo(g_hAllocator, bufferInfos[2].Allocation, &allocInfo); - assert(allocInfo.pMappedData == data2); + TEST(allocInfo.pMappedData == data2); // Destroy all buffers. for(size_t i = 3; i--; ) @@ -3295,11 +3295,11 @@ static void TestMappingMultithreaded() VmaPool pool = nullptr; if(testIndex == TEST_POOL) { - assert(memTypeIndex != UINT32_MAX); + TEST(memTypeIndex != UINT32_MAX); VmaPoolCreateInfo poolInfo = {}; poolInfo.memoryTypeIndex = memTypeIndex; res = vmaCreatePool(g_hAllocator, &poolInfo, &pool); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); } VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; @@ -3350,7 +3350,7 @@ static void TestMappingMultithreaded() VmaAllocationInfo allocInfo; VkResult res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &localAllocCreateInfo, &bufInfo.Buffer, &bufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); if(memTypeIndex == UINT32_MAX) memTypeIndex = allocInfo.memoryType; @@ -3360,28 +3360,28 @@ static void TestMappingMultithreaded() if(mode == MODE::PERSISTENTLY_MAPPED) { data = (char*)allocInfo.pMappedData; - assert(data != nullptr); + TEST(data != nullptr); } else if(mode == MODE::MAP_FOR_MOMENT || mode == MODE::MAP_FOR_LONGER || mode == MODE::MAP_TWO_TIMES) { - assert(data == nullptr); + TEST(data == nullptr); res = vmaMapMemory(g_hAllocator, bufInfo.Allocation, (void**)&data); - assert(res == VK_SUCCESS && data != nullptr); + TEST(res == VK_SUCCESS && data != nullptr); if(mode == MODE::MAP_TWO_TIMES) { char* data2 = nullptr; res = vmaMapMemory(g_hAllocator, bufInfo.Allocation, (void**)&data2); - assert(res == VK_SUCCESS && data2 == data); + TEST(res == VK_SUCCESS && data2 == data); } } else if(mode == MODE::DONT_MAP) { - assert(allocInfo.pMappedData == nullptr); + TEST(allocInfo.pMappedData == nullptr); } else - assert(0); + TEST(0); // Test if reading and writing from the beginning and end of mapped memory doesn't crash. if(data) @@ -3394,9 +3394,9 @@ static void TestMappingMultithreaded() VmaAllocationInfo allocInfo; vmaGetAllocationInfo(g_hAllocator, bufInfo.Allocation, &allocInfo); if(mode == MODE::MAP_FOR_MOMENT) - assert(allocInfo.pMappedData == nullptr); + TEST(allocInfo.pMappedData == nullptr); else - assert(allocInfo.pMappedData == data); + TEST(allocInfo.pMappedData == data); } switch(rand.Generate() % 3) @@ -3420,7 +3420,7 @@ static void TestMappingMultithreaded() VmaAllocationInfo allocInfo; vmaGetAllocationInfo(g_hAllocator, bufInfos[bufferIndex].Allocation, &allocInfo); - assert(allocInfo.pMappedData == nullptr); + TEST(allocInfo.pMappedData == nullptr); } vmaDestroyBuffer(g_hAllocator, bufInfos[bufferIndex].Buffer, bufInfos[bufferIndex].Allocation); @@ -3578,7 +3578,7 @@ static void PerformCustomMainTest(FILE* file) Result result{}; VkResult res = MainTest(result, config); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); WriteMainTestResult(file, "Foo", "CustomTest", config, result); } @@ -3868,7 +3868,7 @@ static void PerformMainTests(FILE* file) Result result{}; VkResult res = MainTest(result, config); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); if(file) { WriteMainTestResult(file, CODE_DESCRIPTION, testDescription, config, result); @@ -4123,7 +4123,7 @@ static void BasicTestBuddyAllocator() VmaPoolCreateInfo poolCreateInfo = {}; VkResult res = vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &sampleBufCreateInfo, &sampleAllocCreateInfo, &poolCreateInfo.memoryTypeIndex); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); // Deliberately adding 1023 to test usable size smaller than memory block size. poolCreateInfo.blockSize = 1024 * 1024 + 1023; @@ -4132,7 +4132,7 @@ static void BasicTestBuddyAllocator() VmaPool pool = nullptr; res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); VkBufferCreateInfo bufCreateInfo = sampleBufCreateInfo; @@ -4146,26 +4146,26 @@ static void BasicTestBuddyAllocator() bufCreateInfo.size = 1024 * 256; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); bufInfo.push_back(newBufInfo); bufCreateInfo.size = 1024 * 512; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); bufInfo.push_back(newBufInfo); bufCreateInfo.size = 1024 * 128; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); bufInfo.push_back(newBufInfo); // Test very small allocation, smaller than minimum node size. bufCreateInfo.size = 1; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); bufInfo.push_back(newBufInfo); // Test some small allocation with alignment requirement. @@ -4178,8 +4178,8 @@ static void BasicTestBuddyAllocator() newBufInfo.Buffer = VK_NULL_HANDLE; res = vmaAllocateMemory(g_hAllocator, &memReq, &allocCreateInfo, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); - assert(allocInfo.offset % memReq.alignment == 0); + TEST(res == VK_SUCCESS); + TEST(allocInfo.offset % memReq.alignment == 0); bufInfo.push_back(newBufInfo); } @@ -4195,7 +4195,7 @@ static void BasicTestBuddyAllocator() bufCreateInfo.size = 1024 * (rand.Generate() % 32 + 1); res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); bufInfo.push_back(newBufInfo); } @@ -4228,7 +4228,7 @@ static void BasicTestAllocatePages() VmaPoolCreateInfo poolCreateInfo = {}; VkResult res = vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &sampleBufCreateInfo, &sampleAllocCreateInfo, &poolCreateInfo.memoryTypeIndex); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); // 1 block of 1 MB. poolCreateInfo.blockSize = 1024 * 1024; @@ -4237,7 +4237,7 @@ static void BasicTestAllocatePages() // Create pool. VmaPool pool = nullptr; res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); // Make 100 allocations of 4 KB - they should fit into the pool. VkMemoryRequirements memReq; @@ -4254,10 +4254,10 @@ static void BasicTestAllocatePages() std::vector alloc{allocCount}; std::vector allocInfo{allocCount}; res = vmaAllocateMemoryPages(g_hAllocator, &memReq, &allocCreateInfo, allocCount, alloc.data(), allocInfo.data()); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); for(uint32_t i = 0; i < allocCount; ++i) { - assert(alloc[i] != VK_NULL_HANDLE && + TEST(alloc[i] != VK_NULL_HANDLE && allocInfo[i].pMappedData != nullptr && allocInfo[i].deviceMemory == allocInfo[0].deviceMemory && allocInfo[i].memoryType == allocInfo[0].memoryType); @@ -4272,14 +4272,14 @@ static void BasicTestAllocatePages() // Also test optional allocationInfo = null. memReq.size = 100 * 1024; res = vmaAllocateMemoryPages(g_hAllocator, &memReq, &allocCreateInfo, allocCount, alloc.data(), nullptr); - assert(res != VK_SUCCESS); - assert(std::find_if(alloc.begin(), alloc.end(), [](VmaAllocation alloc){ return alloc != VK_NULL_HANDLE; }) == alloc.end()); + TEST(res != VK_SUCCESS); + TEST(std::find_if(alloc.begin(), alloc.end(), [](VmaAllocation alloc){ return alloc != VK_NULL_HANDLE; }) == alloc.end()); // Make 100 allocations of 4 KB, but with required alignment of 128 KB. This should also fail. memReq.size = 4 * 1024; memReq.alignment = 128 * 1024; res = vmaAllocateMemoryPages(g_hAllocator, &memReq, &allocCreateInfo, allocCount, alloc.data(), allocInfo.data()); - assert(res != VK_SUCCESS); + TEST(res != VK_SUCCESS); // Make 100 dedicated allocations of 4 KB. memReq.alignment = 4 * 1024; @@ -4289,16 +4289,16 @@ static void BasicTestAllocatePages() dedicatedAllocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY; dedicatedAllocCreateInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT | VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; res = vmaAllocateMemoryPages(g_hAllocator, &memReq, &dedicatedAllocCreateInfo, allocCount, alloc.data(), allocInfo.data()); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); for(uint32_t i = 0; i < allocCount; ++i) { - assert(alloc[i] != VK_NULL_HANDLE && + TEST(alloc[i] != VK_NULL_HANDLE && allocInfo[i].pMappedData != nullptr && allocInfo[i].memoryType == allocInfo[0].memoryType && allocInfo[i].offset == 0); if(i > 0) { - assert(allocInfo[i].deviceMemory != allocInfo[0].deviceMemory); + TEST(allocInfo[i].deviceMemory != allocInfo[0].deviceMemory); } } diff --git a/src/VulkanSample.cpp b/src/VulkanSample.cpp index b606433..7c78469 100644 --- a/src/VulkanSample.cpp +++ b/src/VulkanSample.cpp @@ -1767,10 +1767,24 @@ static LRESULT WINAPI WndProc(HWND hWnd, UINT msg, WPARAM wParam, LPARAM lParam) PostMessage(hWnd, WM_CLOSE, 0, 0); break; case 'T': - Test(); + try + { + Test(); + } + catch(const std::exception& ex) + { + printf("ERROR: %s\n", ex.what()); + } break; case 'S': - TestSparseBinding(); + try + { + TestSparseBinding(); + } + catch(const std::exception& ex) + { + printf("ERROR: %s\n", ex.what()); + } break; } return 0; From 978fcf54ab60bfa2e155ce441ed9ee177fa439d3 Mon Sep 17 00:00:00 2001 From: Adam Sawicki Date: Wed, 5 Dec 2018 14:38:48 +0100 Subject: [PATCH 06/10] Testing environment: Added class StagingBufferCollection, functions UploadGpuData, ValidateGpuData, TestGpuData, in preparation for testing defragmentation of GPU memory. # Conflicts: # src/Tests.cpp # src/VulkanSample.cpp --- src/Tests.cpp | 333 +++++++++++++++++++++++++++++++++++++++++++ src/VulkanSample.cpp | 16 +-- 2 files changed, 336 insertions(+), 13 deletions(-) diff --git a/src/Tests.cpp b/src/Tests.cpp index 3a5c78b..2d0efdf 100644 --- a/src/Tests.cpp +++ b/src/Tests.cpp @@ -9,6 +9,10 @@ static const char* CODE_DESCRIPTION = "Foo"; +extern VkCommandBuffer g_hTemporaryCommandBuffer; +void BeginSingleTimeCommands(); +void EndSingleTimeCommands(); + enum CONFIG_TYPE { CONFIG_TYPE_MINIMUM, CONFIG_TYPE_SMALL, @@ -656,6 +660,299 @@ struct AllocInfo }; }; +class StagingBufferCollection +{ +public: + StagingBufferCollection() { } + ~StagingBufferCollection(); + // Returns false if maximum total size of buffers would be exceeded. + bool AcquireBuffer(VkDeviceSize size, VkBuffer& outBuffer, void*& outMappedPtr); + void ReleaseAllBuffers(); + +private: + static const VkDeviceSize MAX_TOTAL_SIZE = 256ull * 1024 * 1024; + struct BufInfo + { + VmaAllocation Allocation = VK_NULL_HANDLE; + VkBuffer Buffer = VK_NULL_HANDLE; + VkDeviceSize Size = VK_WHOLE_SIZE; + void* MappedPtr = nullptr; + bool Used = false; + }; + std::vector m_Bufs; + // Including both used and unused. + VkDeviceSize m_TotalSize = 0; +}; + +StagingBufferCollection::~StagingBufferCollection() +{ + for(size_t i = m_Bufs.size(); i--; ) + { + vmaDestroyBuffer(g_hAllocator, m_Bufs[i].Buffer, m_Bufs[i].Allocation); + } +} + +bool StagingBufferCollection::AcquireBuffer(VkDeviceSize size, VkBuffer& outBuffer, void*& outMappedPtr) +{ + assert(size <= MAX_TOTAL_SIZE); + + // Try to find existing unused buffer with best size. + size_t bestIndex = SIZE_MAX; + for(size_t i = 0, count = m_Bufs.size(); i < count; ++i) + { + BufInfo& currBufInfo = m_Bufs[i]; + if(!currBufInfo.Used && currBufInfo.Size >= size && + (bestIndex == SIZE_MAX || currBufInfo.Size < m_Bufs[bestIndex].Size)) + { + bestIndex = i; + } + } + + if(bestIndex != SIZE_MAX) + { + m_Bufs[bestIndex].Used = true; + outBuffer = m_Bufs[bestIndex].Buffer; + outMappedPtr = m_Bufs[bestIndex].MappedPtr; + return true; + } + + // Allocate new buffer with requested size. + if(m_TotalSize + size <= MAX_TOTAL_SIZE) + { + BufInfo bufInfo; + bufInfo.Size = size; + bufInfo.Used = true; + + VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; + bufCreateInfo.size = size; + bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + + VmaAllocationCreateInfo allocCreateInfo = {}; + allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY; + allocCreateInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT; + + VmaAllocationInfo allocInfo; + VkResult res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &bufInfo.Buffer, &bufInfo.Allocation, &allocInfo); + bufInfo.MappedPtr = allocInfo.pMappedData; + TEST(res == VK_SUCCESS && bufInfo.MappedPtr); + + outBuffer = bufInfo.Buffer; + outMappedPtr = bufInfo.MappedPtr; + + m_Bufs.push_back(std::move(bufInfo)); + + m_TotalSize += size; + + return true; + } + + // There are some unused but smaller buffers: Free them and try again. + bool hasUnused = false; + for(size_t i = 0, count = m_Bufs.size(); i < count; ++i) + { + if(!m_Bufs[i].Used) + { + hasUnused = true; + break; + } + } + if(hasUnused) + { + for(size_t i = m_Bufs.size(); i--; ) + { + if(!m_Bufs[i].Used) + { + m_TotalSize -= m_Bufs[i].Size; + vmaDestroyBuffer(g_hAllocator, m_Bufs[i].Buffer, m_Bufs[i].Allocation); + m_Bufs.erase(m_Bufs.begin() + i); + } + } + + return AcquireBuffer(size, outBuffer, outMappedPtr); + } + + return false; +} + +void StagingBufferCollection::ReleaseAllBuffers() +{ + for(size_t i = 0, count = m_Bufs.size(); i < count; ++i) + { + m_Bufs[i].Used = false; + } +} + +static void UploadGpuData(const AllocInfo* allocInfo, size_t allocInfoCount) +{ + StagingBufferCollection stagingBufs; + + bool cmdBufferStarted = false; + for(size_t allocInfoIndex = 0; allocInfoIndex < allocInfoCount; ++allocInfoIndex) + { + const AllocInfo& currAllocInfo = allocInfo[allocInfoIndex]; + if(currAllocInfo.m_Buffer) + { + const VkDeviceSize size = currAllocInfo.m_BufferInfo.size; + + VkBuffer stagingBuf = VK_NULL_HANDLE; + void* stagingBufMappedPtr = nullptr; + if(!stagingBufs.AcquireBuffer(size, stagingBuf, stagingBufMappedPtr)) + { + TEST(cmdBufferStarted); + EndSingleTimeCommands(); + stagingBufs.ReleaseAllBuffers(); + cmdBufferStarted = false; + + bool ok = stagingBufs.AcquireBuffer(size, stagingBuf, stagingBufMappedPtr); + TEST(ok); + } + + // Fill staging buffer. + { + assert(size % sizeof(uint32_t) == 0); + uint32_t* stagingValPtr = (uint32_t*)stagingBufMappedPtr; + uint32_t val = currAllocInfo.m_StartValue; + for(size_t i = 0; i < size / sizeof(uint32_t); ++i) + { + *stagingValPtr = val; + ++stagingValPtr; + ++val; + } + } + + // Issue copy command from staging buffer to destination buffer. + if(!cmdBufferStarted) + { + cmdBufferStarted = true; + BeginSingleTimeCommands(); + } + + VkBufferCopy copy = {}; + copy.srcOffset = 0; + copy.dstOffset = 0; + copy.size = size; + vkCmdCopyBuffer(g_hTemporaryCommandBuffer, stagingBuf, currAllocInfo.m_Buffer, 1, ©); + } + else + { + TEST(0 && "Images not currently supported."); + } + } + + if(cmdBufferStarted) + { + EndSingleTimeCommands(); + stagingBufs.ReleaseAllBuffers(); + } +} + +static void ValidateGpuData(const AllocInfo* allocInfo, size_t allocInfoCount) +{ + StagingBufferCollection stagingBufs; + + bool cmdBufferStarted = false; + size_t validateAllocIndexOffset = 0; + std::vector validateStagingBuffers; + for(size_t allocInfoIndex = 0; allocInfoIndex < allocInfoCount; ++allocInfoIndex) + { + const AllocInfo& currAllocInfo = allocInfo[allocInfoIndex]; + if(currAllocInfo.m_Buffer) + { + const VkDeviceSize size = currAllocInfo.m_BufferInfo.size; + + VkBuffer stagingBuf = VK_NULL_HANDLE; + void* stagingBufMappedPtr = nullptr; + if(!stagingBufs.AcquireBuffer(size, stagingBuf, stagingBufMappedPtr)) + { + TEST(cmdBufferStarted); + EndSingleTimeCommands(); + cmdBufferStarted = false; + + for(size_t validateIndex = 0; + validateIndex < validateStagingBuffers.size(); + ++validateIndex) + { + const size_t validateAllocIndex = validateIndex + validateAllocIndexOffset; + const VkDeviceSize validateSize = allocInfo[validateAllocIndex].m_BufferInfo.size; + TEST(validateSize % sizeof(uint32_t) == 0); + const uint32_t* stagingValPtr = (const uint32_t*)validateStagingBuffers[validateIndex]; + uint32_t val = allocInfo[validateAllocIndex].m_StartValue; + bool valid = true; + for(size_t i = 0; i < validateSize / sizeof(uint32_t); ++i) + { + if(*stagingValPtr != val) + { + valid = false; + break; + } + ++stagingValPtr; + ++val; + } + TEST(valid); + } + + stagingBufs.ReleaseAllBuffers(); + + validateAllocIndexOffset = allocInfoIndex; + validateStagingBuffers.clear(); + + bool ok = stagingBufs.AcquireBuffer(size, stagingBuf, stagingBufMappedPtr); + TEST(ok); + } + + // Issue copy command from staging buffer to destination buffer. + if(!cmdBufferStarted) + { + cmdBufferStarted = true; + BeginSingleTimeCommands(); + } + + VkBufferCopy copy = {}; + copy.srcOffset = 0; + copy.dstOffset = 0; + copy.size = size; + vkCmdCopyBuffer(g_hTemporaryCommandBuffer, currAllocInfo.m_Buffer, stagingBuf, 1, ©); + + // Sava mapped pointer for later validation. + validateStagingBuffers.push_back(stagingBufMappedPtr); + } + else + { + TEST(0 && "Images not currently supported."); + } + } + + if(cmdBufferStarted) + { + EndSingleTimeCommands(); + + for(size_t validateIndex = 0; + validateIndex < validateStagingBuffers.size(); + ++validateIndex) + { + const size_t validateAllocIndex = validateIndex + validateAllocIndexOffset; + const VkDeviceSize validateSize = allocInfo[validateAllocIndex].m_BufferInfo.size; + TEST(validateSize % sizeof(uint32_t) == 0); + const uint32_t* stagingValPtr = (const uint32_t*)validateStagingBuffers[validateIndex]; + uint32_t val = allocInfo[validateAllocIndex].m_StartValue; + bool valid = true; + for(size_t i = 0; i < validateSize / sizeof(uint32_t); ++i) + { + if(*stagingValPtr != val) + { + valid = false; + break; + } + ++stagingValPtr; + ++val; + } + TEST(valid); + } + + stagingBufs.ReleaseAllBuffers(); + } +} + static void GetMemReq(VmaAllocationCreateInfo& outMemReq) { outMemReq = {}; @@ -4515,6 +4812,41 @@ static void BasicTestAllocatePages() vmaDestroyPool(g_hAllocator, pool); } +// Test the testing environment. +static void TestGpuData() +{ + RandomNumberGenerator rand = { 53434 }; + + std::vector allocInfo; + + for(size_t i = 0; i < 100; ++i) + { + AllocInfo info = {}; + + info.m_BufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; + info.m_BufferInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | + VK_BUFFER_USAGE_TRANSFER_SRC_BIT | + VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; + info.m_BufferInfo.size = 1024 * 1024 * (rand.Generate() % 9 + 1); + + VmaAllocationCreateInfo allocCreateInfo = {}; + allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; + + VkResult res = vmaCreateBuffer(g_hAllocator, &info.m_BufferInfo, &allocCreateInfo, &info.m_Buffer, &info.m_Allocation, nullptr); + TEST(res == VK_SUCCESS); + + info.m_StartValue = rand.Generate(); + + allocInfo.push_back(std::move(info)); + } + + UploadGpuData(allocInfo.data(), allocInfo.size()); + + ValidateGpuData(allocInfo.data(), allocInfo.size()); + + DestroyAllAllocations(allocInfo); +} + void Test() { wprintf(L"TESTING:\n"); @@ -4532,6 +4864,7 @@ void Test() // # Simple tests TestBasics(); + //TestGpuData(); // Not calling this because it's just testing the testing environment. #if VMA_DEBUG_MARGIN TestDebugMargin(); #else diff --git a/src/VulkanSample.cpp b/src/VulkanSample.cpp index 0da5be2..0e5cae6 100644 --- a/src/VulkanSample.cpp +++ b/src/VulkanSample.cpp @@ -91,7 +91,7 @@ static VkDebugReportCallbackEXT g_hCallback; static VkQueue g_hGraphicsQueue; VkQueue g_hSparseBindingQueue; -static VkCommandBuffer g_hTemporaryCommandBuffer; +VkCommandBuffer g_hTemporaryCommandBuffer; static VkPipelineLayout g_hPipelineLayout; static VkRenderPass g_hRenderPass; @@ -130,14 +130,14 @@ static void CustomCpuFree(void* pUserData, void* pMemory) _aligned_free(pMemory); } -static void BeginSingleTimeCommands() +void BeginSingleTimeCommands() { VkCommandBufferBeginInfo cmdBufBeginInfo = { VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO }; cmdBufBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT; ERR_GUARD_VULKAN( vkBeginCommandBuffer(g_hTemporaryCommandBuffer, &cmdBufBeginInfo) ); } -static void EndSingleTimeCommands() +void EndSingleTimeCommands() { ERR_GUARD_VULKAN( vkEndCommandBuffer(g_hTemporaryCommandBuffer) ); @@ -1785,16 +1785,6 @@ static LRESULT WINAPI WndProc(HWND hWnd, UINT msg, WPARAM wParam, LPARAM lParam) printf("ERROR: %s\n", ex.what()); } break; - case 'S': - try - { - TestSparseBinding(); - } - catch(const std::exception& ex) - { - printf("ERROR: %s\n", ex.what()); - } - break; } return 0; From da6c19423d16fdf30c139ea9a7e431458ae033be Mon Sep 17 00:00:00 2001 From: Adam Sawicki Date: Wed, 5 Dec 2018 17:34:34 +0100 Subject: [PATCH 07/10] Wrote test for sparse image binding with testing actual content - function BaseImage::TestContent. It uses vkCopyBufferToImage and then a compute shader to read back pixels of the image. --- bin/SparseBindingTest.comp.spv | Bin 0 -> 1912 bytes src/Shaders/CompileShaders.bat | 1 + src/Shaders/SparseBindingTest.comp | 44 ++++ src/SparseBindingTest.cpp | 370 +++++++++++++++++++++++++++-- src/VulkanSample.cpp | 22 +- 5 files changed, 412 insertions(+), 25 deletions(-) create mode 100644 bin/SparseBindingTest.comp.spv create mode 100644 src/Shaders/SparseBindingTest.comp diff --git a/bin/SparseBindingTest.comp.spv b/bin/SparseBindingTest.comp.spv new file mode 100644 index 0000000000000000000000000000000000000000..d56e0a95ff61575b068de9b8c42283b0ed9e06ae GIT binary patch literal 1912 zcmZ9LSx;0!5Jnq@Rg^^pL<9#=QBm9g7X;aKKv5CY_%<0}M9jb#28<>q`k;Tz=zsFX z#B+w*W~7t4{nc01wRH7ORS(vOP!;M!LwFeuR$His385*}mG{ij>e5JQZ)0S1?3y0U zAyszNrv;x{_8Me6x3#0;IMNC)*M{g9CB@ll^dAs4p)q8ZvNJ2Q*;4*}Za24=&z9ci zHuAgK;@Z3X`d%pzyXL>xwVZF{{Krq)QsF3gZT}6~>dF{Z{Z+28pXYelr&*F!OOMvJ zS;%Gz#kE{vapyyEJ-4@2+*zD2+iSu__Vr?+xLej!;R^fK_GZXXMH9FVv9AT9ncFpZ zu;Fu1kGG@0c?bU44)lI>??St^81rRh; z+SBL_>~f0!N6I`ORxgpvFV~mUuI{;G?8dOiiA1*_xnXqkwjeI^wz3)VdF^gqF_O+MPYz%gDo*xZ%&G`eRx@jB10ZY@LX>dwj77ufAzzMI!`iCw#A zD_7Keh&PNuKhIxIoaF39-PyRz=TBk0csJr3h%5GT3#{FEci62*Kke@KkPyDisn>z- zBPWrlKLC4|QC|W(+i~8ZaqZRRef@^X zL|e?a05+ff#e9ok`Izq!SgtbPV|4kL?+IAW|0Cvm3O1*& out, const char* fileName); //////////////////////////////////////////////////////////////////////////////// // Class definitions @@ -24,10 +28,17 @@ public: virtual void Init(RandomNumberGenerator& rand) = 0; virtual ~BaseImage(); + const VkImageCreateInfo& GetCreateInfo() const { return m_CreateInfo; } + + void TestContent(RandomNumberGenerator& rand); + protected: + VkImageCreateInfo m_CreateInfo = {}; VkImage m_Image = VK_NULL_HANDLE; - void FillImageCreateInfo(VkImageCreateInfo& outInfo, RandomNumberGenerator& rand); + void FillImageCreateInfo(RandomNumberGenerator& rand); + void UploadContent(); + void ValidateContent(RandomNumberGenerator& rand); }; class TraditionalImage : public BaseImage @@ -61,25 +72,324 @@ BaseImage::~BaseImage() } } -void BaseImage::FillImageCreateInfo(VkImageCreateInfo& outInfo, RandomNumberGenerator& rand) +void BaseImage::TestContent(RandomNumberGenerator& rand) +{ + printf("Validating content of %u x %u texture...\n", + m_CreateInfo.extent.width, m_CreateInfo.extent.height); + UploadContent(); + ValidateContent(rand); +} + +void BaseImage::FillImageCreateInfo(RandomNumberGenerator& rand) { constexpr uint32_t imageSizeMin = 8; constexpr uint32_t imageSizeMax = 2048; - ZeroMemory(&outInfo, sizeof(outInfo)); - outInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; - outInfo.imageType = VK_IMAGE_TYPE_2D; - outInfo.extent.width = rand.Generate() % (imageSizeMax - imageSizeMin) + imageSizeMin; - outInfo.extent.height = rand.Generate() % (imageSizeMax - imageSizeMin) + imageSizeMin; - outInfo.extent.depth = 1; - outInfo.mipLevels = 1; // TODO ? - outInfo.arrayLayers = 1; - outInfo.format = VK_FORMAT_R8G8B8A8_UNORM; - outInfo.tiling = VK_IMAGE_TILING_OPTIMAL; - outInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; - outInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; - outInfo.samples = VK_SAMPLE_COUNT_1_BIT; - outInfo.flags = 0; + ZeroMemory(&m_CreateInfo, sizeof(m_CreateInfo)); + m_CreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; + m_CreateInfo.imageType = VK_IMAGE_TYPE_2D; + m_CreateInfo.extent.width = rand.Generate() % (imageSizeMax - imageSizeMin) + imageSizeMin; + m_CreateInfo.extent.height = rand.Generate() % (imageSizeMax - imageSizeMin) + imageSizeMin; + m_CreateInfo.extent.depth = 1; + m_CreateInfo.mipLevels = 1; // TODO ? + m_CreateInfo.arrayLayers = 1; + m_CreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM; + m_CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; + m_CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; + m_CreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; + m_CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; + m_CreateInfo.flags = 0; +} + +void BaseImage::UploadContent() +{ + VkBufferCreateInfo srcBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; + srcBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + srcBufCreateInfo.size = 4 * m_CreateInfo.extent.width * m_CreateInfo.extent.height; + + VmaAllocationCreateInfo srcBufAllocCreateInfo = {}; + srcBufAllocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY; + srcBufAllocCreateInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT; + + VkBuffer srcBuf = nullptr; + VmaAllocation srcBufAlloc = nullptr; + VmaAllocationInfo srcAllocInfo = {}; + TEST( vmaCreateBuffer(g_hAllocator, &srcBufCreateInfo, &srcBufAllocCreateInfo, &srcBuf, &srcBufAlloc, &srcAllocInfo) == VK_SUCCESS ); + + // Fill texels with: r = x % 255, g = u % 255, b = 13, a = 25 + uint32_t* srcBufPtr = (uint32_t*)srcAllocInfo.pMappedData; + for(uint32_t y = 0, sizeY = m_CreateInfo.extent.height; y < sizeY; ++y) + { + for(uint32_t x = 0, sizeX = m_CreateInfo.extent.width; x < sizeX; ++x, ++srcBufPtr) + { + const uint8_t r = (uint8_t)x; + const uint8_t g = (uint8_t)y; + const uint8_t b = 13; + const uint8_t a = 25; + *srcBufPtr = (uint32_t)r << 24 | (uint32_t)g << 16 | + (uint32_t)b << 8 | (uint32_t)a; + } + } + + BeginSingleTimeCommands(); + + // Barrier undefined to transfer dst. + { + VkImageMemoryBarrier barrier = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER }; + barrier.srcAccessMask = 0; + barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; + barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED; + barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; + barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + barrier.image = m_Image; + barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; + barrier.subresourceRange.baseArrayLayer = 0; + barrier.subresourceRange.baseMipLevel = 0; + barrier.subresourceRange.layerCount = 1; + barrier.subresourceRange.levelCount = 1; + + vkCmdPipelineBarrier(g_hTemporaryCommandBuffer, + VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, // srcStageMask + VK_PIPELINE_STAGE_TRANSFER_BIT, // dstStageMask + 0, // dependencyFlags + 0, nullptr, // memoryBarriers + 0, nullptr, // bufferMemoryBarriers + 1, &barrier); // imageMemoryBarriers + } + + // CopyBufferToImage + { + VkBufferImageCopy region = {}; + region.bufferOffset = 0; + region.bufferRowLength = 0; // Zeros mean tightly packed. + region.bufferImageHeight = 0; // Zeros mean tightly packed. + region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; + region.imageSubresource.mipLevel = 0; + region.imageSubresource.baseArrayLayer = 0; + region.imageSubresource.layerCount = 1; + region.imageOffset = { 0, 0, 0 }; + region.imageExtent = m_CreateInfo.extent; + vkCmdCopyBufferToImage(g_hTemporaryCommandBuffer, srcBuf, m_Image, + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion); + } + + // Barrier transfer dst to fragment shader read only. + { + VkImageMemoryBarrier barrier = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER }; + barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; + barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; + barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; + barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + barrier.image = m_Image; + barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; + barrier.subresourceRange.baseArrayLayer = 0; + barrier.subresourceRange.baseMipLevel = 0; + barrier.subresourceRange.layerCount = 1; + barrier.subresourceRange.levelCount = 1; + + vkCmdPipelineBarrier(g_hTemporaryCommandBuffer, + VK_PIPELINE_STAGE_TRANSFER_BIT, // srcStageMask + VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, // dstStageMask + 0, // dependencyFlags + 0, nullptr, // memoryBarriers + 0, nullptr, // bufferMemoryBarriers + 1, &barrier); // imageMemoryBarriers + } + + EndSingleTimeCommands(); + + vmaDestroyBuffer(g_hAllocator, srcBuf, srcBufAlloc); +} + +void BaseImage::ValidateContent(RandomNumberGenerator& rand) +{ + /* + dstBuf has following layout: + For each of texels to be sampled, [0..valueCount): + struct { + in uint32_t pixelX; + in uint32_t pixelY; + out uint32_t pixelColor; + } + */ + + const uint32_t valueCount = 32; + + VkBufferCreateInfo dstBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; + dstBufCreateInfo.usage = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT; + dstBufCreateInfo.size = valueCount * sizeof(uint32_t) * 3; + + VmaAllocationCreateInfo dstBufAllocCreateInfo = {}; + dstBufAllocCreateInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT; + dstBufAllocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_TO_CPU; + + VkBuffer dstBuf = nullptr; + VmaAllocation dstBufAlloc = nullptr; + VmaAllocationInfo dstBufAllocInfo = {}; + TEST( vmaCreateBuffer(g_hAllocator, &dstBufCreateInfo, &dstBufAllocCreateInfo, &dstBuf, &dstBufAlloc, &dstBufAllocInfo) == VK_SUCCESS ); + + // Fill dstBuf input data. + { + uint32_t* dstBufContent = (uint32_t*)dstBufAllocInfo.pMappedData; + for(uint32_t i = 0; i < valueCount; ++i) + { + const uint32_t x = rand.Generate() % m_CreateInfo.extent.width; + const uint32_t y = rand.Generate() % m_CreateInfo.extent.height; + dstBufContent[i * 3 ] = x; + dstBufContent[i * 3 + 1] = y; + dstBufContent[i * 3 + 2] = 0; + } + } + + VkSamplerCreateInfo samplerCreateInfo = { VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO }; + samplerCreateInfo.magFilter = VK_FILTER_NEAREST; + samplerCreateInfo.minFilter = VK_FILTER_NEAREST; + samplerCreateInfo.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST; + samplerCreateInfo.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; + samplerCreateInfo.addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; + samplerCreateInfo.addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; + samplerCreateInfo.unnormalizedCoordinates = VK_TRUE; + + VkSampler sampler = nullptr; + TEST( vkCreateSampler( g_hDevice, &samplerCreateInfo, nullptr, &sampler) == VK_SUCCESS ); + + VkDescriptorSetLayoutBinding bindings[2] = {}; + bindings[0].binding = 0; + bindings[0].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; + bindings[0].descriptorCount = 1; + bindings[0].stageFlags = VK_SHADER_STAGE_COMPUTE_BIT; + bindings[0].pImmutableSamplers = &sampler; + bindings[1].binding = 1; + bindings[1].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; + bindings[1].descriptorCount = 1; + bindings[1].stageFlags = VK_SHADER_STAGE_COMPUTE_BIT; + + VkDescriptorSetLayoutCreateInfo descSetLayoutCreateInfo = { VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO }; + descSetLayoutCreateInfo.bindingCount = 2; + descSetLayoutCreateInfo.pBindings = bindings; + + VkDescriptorSetLayout descSetLayout = nullptr; + TEST( vkCreateDescriptorSetLayout(g_hDevice, &descSetLayoutCreateInfo, nullptr, &descSetLayout) == VK_SUCCESS ); + + VkPipelineLayoutCreateInfo pipelineLayoutCreateInfo = { VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO }; + pipelineLayoutCreateInfo.setLayoutCount = 1; + pipelineLayoutCreateInfo.pSetLayouts = &descSetLayout; + + VkPipelineLayout pipelineLayout = nullptr; + TEST( vkCreatePipelineLayout(g_hDevice, &pipelineLayoutCreateInfo, nullptr, &pipelineLayout) == VK_SUCCESS ); + + std::vector shaderCode; + LoadShader(shaderCode, "SparseBindingTest.comp.spv"); + + VkShaderModuleCreateInfo shaderModuleCreateInfo = { VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO }; + shaderModuleCreateInfo.codeSize = shaderCode.size(); + shaderModuleCreateInfo.pCode = (const uint32_t*)shaderCode.data(); + + VkShaderModule shaderModule = nullptr; + TEST( vkCreateShaderModule(g_hDevice, &shaderModuleCreateInfo, nullptr, &shaderModule) == VK_SUCCESS ); + + VkComputePipelineCreateInfo pipelineCreateInfo = { VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO }; + pipelineCreateInfo.stage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; + pipelineCreateInfo.stage.stage = VK_SHADER_STAGE_COMPUTE_BIT; + pipelineCreateInfo.stage.module = shaderModule; + pipelineCreateInfo.stage.pName = "main"; + pipelineCreateInfo.layout = pipelineLayout; + + VkPipeline pipeline = nullptr; + TEST( vkCreateComputePipelines(g_hDevice, nullptr, 1, &pipelineCreateInfo, nullptr, &pipeline) == VK_SUCCESS ); + + VkDescriptorPoolSize poolSizes[2] = {}; + poolSizes[0].type = bindings[0].descriptorType; + poolSizes[0].descriptorCount = bindings[0].descriptorCount; + poolSizes[1].type = bindings[1].descriptorType; + poolSizes[1].descriptorCount = bindings[1].descriptorCount; + + VkDescriptorPoolCreateInfo descPoolCreateInfo = { VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO }; + descPoolCreateInfo.maxSets = 1; + descPoolCreateInfo.poolSizeCount = 2; + descPoolCreateInfo.pPoolSizes = poolSizes; + + VkDescriptorPool descPool = nullptr; + TEST( vkCreateDescriptorPool(g_hDevice, &descPoolCreateInfo, nullptr, &descPool) == VK_SUCCESS ); + + VkDescriptorSetAllocateInfo descSetAllocInfo = { VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO }; + descSetAllocInfo.descriptorPool = descPool; + descSetAllocInfo.descriptorSetCount = 1; + descSetAllocInfo.pSetLayouts = &descSetLayout; + + VkDescriptorSet descSet = nullptr; + TEST( vkAllocateDescriptorSets(g_hDevice, &descSetAllocInfo, &descSet) == VK_SUCCESS ); + + VkImageViewCreateInfo imageViewCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO }; + imageViewCreateInfo.image = m_Image; + imageViewCreateInfo.viewType = VK_IMAGE_VIEW_TYPE_2D; + imageViewCreateInfo.format = m_CreateInfo.format; + imageViewCreateInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; + imageViewCreateInfo.subresourceRange.layerCount = 1; + imageViewCreateInfo.subresourceRange.levelCount = 1; + + VkImageView imageView = nullptr; + TEST( vkCreateImageView(g_hDevice, &imageViewCreateInfo, nullptr, &imageView) == VK_SUCCESS ); + + VkDescriptorImageInfo descImageInfo = {}; + descImageInfo.imageView = imageView; + descImageInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + + VkDescriptorBufferInfo descBufferInfo = {}; + descBufferInfo.buffer = dstBuf; + descBufferInfo.offset = 0; + descBufferInfo.range = VK_WHOLE_SIZE; + + VkWriteDescriptorSet descWrites[2] = {}; + descWrites[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + descWrites[0].dstSet = descSet; + descWrites[0].dstBinding = bindings[0].binding; + descWrites[0].dstArrayElement = 0; + descWrites[0].descriptorCount = 1; + descWrites[0].descriptorType = bindings[0].descriptorType; + descWrites[0].pImageInfo = &descImageInfo; + descWrites[1].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + descWrites[1].dstSet = descSet; + descWrites[1].dstBinding = bindings[1].binding; + descWrites[1].dstArrayElement = 0; + descWrites[1].descriptorCount = 1; + descWrites[1].descriptorType = bindings[1].descriptorType; + descWrites[1].pBufferInfo = &descBufferInfo; + vkUpdateDescriptorSets(g_hDevice, 2, descWrites, 0, nullptr); + + BeginSingleTimeCommands(); + vkCmdBindPipeline(g_hTemporaryCommandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipeline); + vkCmdBindDescriptorSets(g_hTemporaryCommandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0, 1, &descSet, 0, nullptr); + vkCmdDispatch(g_hTemporaryCommandBuffer, valueCount, 1, 1); + EndSingleTimeCommands(); + + // Validate dstBuf output data. + { + const uint32_t* dstBufContent = (const uint32_t*)dstBufAllocInfo.pMappedData; + for(uint32_t i = 0; i < valueCount; ++i) + { + const uint32_t x = dstBufContent[i * 3 ]; + const uint32_t y = dstBufContent[i * 3 + 1]; + const uint32_t color = dstBufContent[i * 3 + 2]; + const uint8_t a = (uint8_t)(color >> 24); + const uint8_t b = (uint8_t)(color >> 16); + const uint8_t g = (uint8_t)(color >> 8); + const uint8_t r = (uint8_t)color; + TEST(r == (uint8_t)x && g == (uint8_t)y && b == 13 && a == 25); + } + } + + vkDestroyImageView(g_hDevice, imageView, nullptr); + vkDestroyDescriptorPool(g_hDevice, descPool, nullptr); + vmaDestroyBuffer(g_hAllocator, dstBuf, dstBufAlloc); + vkDestroyPipeline(g_hDevice, pipeline, nullptr); + vkDestroyShaderModule(g_hDevice, shaderModule, nullptr); + vkDestroyPipelineLayout(g_hDevice, pipelineLayout, nullptr); + vkDestroyDescriptorSetLayout(g_hDevice, descSetLayout, nullptr); + vkDestroySampler(g_hDevice, sampler, nullptr); } //////////////////////////////////////////////////////////////////////////////// @@ -87,15 +397,14 @@ void BaseImage::FillImageCreateInfo(VkImageCreateInfo& outInfo, RandomNumberGene void TraditionalImage::Init(RandomNumberGenerator& rand) { - VkImageCreateInfo imageCreateInfo; - FillImageCreateInfo(imageCreateInfo, rand); + FillImageCreateInfo(rand); VmaAllocationCreateInfo allocCreateInfo = {}; allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; // Default BEST_FIT is clearly better. //allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT; - ERR_GUARD_VULKAN( vmaCreateImage(g_hAllocator, &imageCreateInfo, &allocCreateInfo, + ERR_GUARD_VULKAN( vmaCreateImage(g_hAllocator, &m_CreateInfo, &allocCreateInfo, &m_Image, &m_Allocation, nullptr) ); } @@ -115,10 +424,9 @@ void SparseBindingImage::Init(RandomNumberGenerator& rand) assert(g_SparseBindingEnabled && g_hSparseBindingQueue); // Create image. - VkImageCreateInfo imageCreateInfo; - FillImageCreateInfo(imageCreateInfo, rand); - imageCreateInfo.flags |= VK_IMAGE_CREATE_SPARSE_BINDING_BIT; - ERR_GUARD_VULKAN( vkCreateImage(g_hDevice, &imageCreateInfo, nullptr, &m_Image) ); + FillImageCreateInfo(rand); + m_CreateInfo.flags |= VK_IMAGE_CREATE_SPARSE_BINDING_BIT; + ERR_GUARD_VULKAN( vkCreateImage(g_hDevice, &m_CreateInfo, nullptr, &m_Image) ); // Get memory requirements. VkMemoryRequirements imageMemReq; @@ -126,6 +434,7 @@ void SparseBindingImage::Init(RandomNumberGenerator& rand) // This is just to silence validation layer warning. // But it doesn't help. Looks like a bug in Vulkan validation layers. + // See: https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/364 uint32_t sparseMemReqCount = 0; vkGetImageSparseMemoryRequirements(g_hDevice, m_Image, &sparseMemReqCount, nullptr); TEST(sparseMemReqCount <= 8); @@ -224,6 +533,21 @@ void TestSparseBinding() SaveAllocatorStatsToFile(L"SparseBindingTest.json"); + // Choose biggest image. Test uploading and sampling. + BaseImage* biggestImage = nullptr; + for(size_t i = 0, count = images.size(); i < count; ++i) + { + if(!biggestImage || + images[i].image->GetCreateInfo().extent.width * images[i].image->GetCreateInfo().extent.height > + biggestImage->GetCreateInfo().extent.width * biggestImage->GetCreateInfo().extent.height) + { + biggestImage = images[i].image.get(); + } + } + assert(biggestImage); + + biggestImage->TestContent(rand); + // Free remaining images. images.clear(); } diff --git a/src/VulkanSample.cpp b/src/VulkanSample.cpp index 0e5cae6..ed79925 100644 --- a/src/VulkanSample.cpp +++ b/src/VulkanSample.cpp @@ -149,7 +149,7 @@ void EndSingleTimeCommands() ERR_GUARD_VULKAN( vkQueueWaitIdle(g_hGraphicsQueue) ); } -static void LoadShader(std::vector& out, const char* fileName) +void LoadShader(std::vector& out, const char* fileName) { std::ifstream file(std::string(SHADER_PATH1) + fileName, std::ios::ate | std::ios::binary); if(file.is_open() == false) @@ -1222,8 +1222,9 @@ static void InitializeApplication() { if(queueFamilies[i].queueCount > 0) { + const uint32_t flagsForGraphicsQueue = VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT; if((g_GraphicsQueueFamilyIndex != 0) && - ((queueFamilies[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) != 0)) + ((queueFamilies[i].queueFlags & flagsForGraphicsQueue) == flagsForGraphicsQueue)) { g_GraphicsQueueFamilyIndex = i; } @@ -1785,6 +1786,23 @@ static LRESULT WINAPI WndProc(HWND hWnd, UINT msg, WPARAM wParam, LPARAM lParam) printf("ERROR: %s\n", ex.what()); } break; + case 'S': + try + { + if(g_SparseBindingEnabled) + { + TestSparseBinding(); + } + else + { + printf("Sparse binding not supported.\n"); + } + } + catch(const std::exception& ex) + { + printf("ERROR: %s\n", ex.what()); + } + break; } return 0; From 4a2be4ee40f0e48b07f52cdd42b5b6e49669108d Mon Sep 17 00:00:00 2001 From: Adam Sawicki Date: Thu, 6 Dec 2018 12:44:49 +0100 Subject: [PATCH 08/10] Minor tweak in sparse binding tests. --- src/SparseBindingTest.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/SparseBindingTest.cpp b/src/SparseBindingTest.cpp index b1fc0cb..c953a05 100644 --- a/src/SparseBindingTest.cpp +++ b/src/SparseBindingTest.cpp @@ -216,7 +216,7 @@ void BaseImage::ValidateContent(RandomNumberGenerator& rand) } */ - const uint32_t valueCount = 32; + const uint32_t valueCount = 128; VkBufferCreateInfo dstBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; dstBufCreateInfo.usage = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT; @@ -501,7 +501,7 @@ void TestSparseBinding() }; std::vector images; - constexpr uint32_t frameCount = 2000; + constexpr uint32_t frameCount = 1000; constexpr uint32_t imageLifeFramesMin = 1; constexpr uint32_t imageLifeFramesMax = 400; From 1ae513ae5cdde3f044680bef8d32058c65622c42 Mon Sep 17 00:00:00 2001 From: Adam Sawicki Date: Thu, 6 Dec 2018 12:49:52 +0100 Subject: [PATCH 09/10] Sparse binding test: added textures with mip maps. --- src/SparseBindingTest.cpp | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/src/SparseBindingTest.cpp b/src/SparseBindingTest.cpp index c953a05..3722523 100644 --- a/src/SparseBindingTest.cpp +++ b/src/SparseBindingTest.cpp @@ -22,6 +22,19 @@ void LoadShader(std::vector& out, const char* fileName); //////////////////////////////////////////////////////////////////////////////// // Class definitions +static uint32_t CalculateMipMapCount(uint32_t width, uint32_t height, uint32_t depth) +{ + uint32_t mipMapCount = 1; + while(width > 1 || height > 1 || depth > 1) + { + ++mipMapCount; + width /= 2; + height /= 2; + depth /= 2; + } + return mipMapCount; +} + class BaseImage { public: @@ -85,13 +98,16 @@ void BaseImage::FillImageCreateInfo(RandomNumberGenerator& rand) constexpr uint32_t imageSizeMin = 8; constexpr uint32_t imageSizeMax = 2048; + const bool useMipMaps = rand.Generate() % 2 != 0; + ZeroMemory(&m_CreateInfo, sizeof(m_CreateInfo)); m_CreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; m_CreateInfo.imageType = VK_IMAGE_TYPE_2D; m_CreateInfo.extent.width = rand.Generate() % (imageSizeMax - imageSizeMin) + imageSizeMin; m_CreateInfo.extent.height = rand.Generate() % (imageSizeMax - imageSizeMin) + imageSizeMin; m_CreateInfo.extent.depth = 1; - m_CreateInfo.mipLevels = 1; // TODO ? + m_CreateInfo.mipLevels = useMipMaps ? + CalculateMipMapCount(m_CreateInfo.extent.width, m_CreateInfo.extent.height, m_CreateInfo.extent.depth) : 1; m_CreateInfo.arrayLayers = 1; m_CreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM; m_CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; From 2e900cae5467dd1a82a67019d78201e79b9c8bd6 Mon Sep 17 00:00:00 2001 From: Adam Sawicki Date: Thu, 6 Dec 2018 14:26:50 +0100 Subject: [PATCH 10/10] Added functions vmaAllocateMemoryPages, vmaFreeMemoryPages to VmaRecorder and VmaReplay. Bumped recording file format version to 1.5. Support for sparse binding is now finished and ready! --- docs/Recording file format.md | 26 ++++- src/VmaReplay/Common.cpp | 24 +++++ src/VmaReplay/Common.h | 1 + src/VmaReplay/VmaReplay.cpp | 187 +++++++++++++++++++++++++++------- src/vk_mem_alloc.h | 75 ++++++++++++-- 5 files changed, 268 insertions(+), 45 deletions(-) diff --git a/docs/Recording file format.md b/docs/Recording file format.md index 875c638..b56413a 100644 --- a/docs/Recording file format.md +++ b/docs/Recording file format.md @@ -23,7 +23,7 @@ Formats with only minor version incremented are backward compatible. VmaReplay application supports all older versions. Current version is: - 1,4 + 1,5 # Configuration @@ -152,6 +152,10 @@ No parameters. - allocation : pointer +**vmaFreeMemoryPages** (min format version: 1.5) + +- allocations : list of pointers + **vmaCreateLostAllocation** (min format version 1.2) - allocation (output) : pointer @@ -170,6 +174,20 @@ No parameters. - allocation (output) : pointer - allocationCreateInfo.pUserData : string (may contain additional commas) +**vmaAllocateMemoryPages** (min format version 1.5) + +- vkMemoryRequirements.size : uint64 +- vkMemoryRequirements.alignment : uint64 +- vkMemoryRequirements.memoryTypeBits : uint32 +- allocationCreateInfo.flags : uint32 +- allocationCreateInfo.usage : uint32 +- allocationCreateInfo.requiredFlags : uint32 +- allocationCreateInfo.preferredFlags : uint32 +- allocationCreateInfo.memoryTypeBits : uint32 +- allocationCreateInfo.pool : pointer +- allocations (output) : list of pointers +- allocationCreateInfo.pUserData : string (may contain additional commas) + **vmaAllocateMemoryForBuffer, vmaAllocateMemoryForImage** (min format version 1.2) - vkMemoryRequirements.size : uint64 @@ -230,10 +248,14 @@ If `VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT` was used with the allocatio It may contain additional commas. It should not contain end-of-line characters - results are then undefined. +**list of (...)** (min format version: 1.5) + +An ordered sequence of values of some type, separated by single space. + # Example file Vulkan Memory Allocator,Calls recording - 1,4 + 1,5 Config,Begin PhysicalDevice,apiVersion,4198477 PhysicalDevice,driverVersion,8388653 diff --git a/src/VmaReplay/Common.cpp b/src/VmaReplay/Common.cpp index 515c1ce..a7c723f 100644 --- a/src/VmaReplay/Common.cpp +++ b/src/VmaReplay/Common.cpp @@ -1,5 +1,29 @@ #include "Common.h" +bool StrRangeToPtrList(const StrRange& s, std::vector& out) +{ + out.clear(); + StrRange currRange = { s.beg, nullptr }; + while(currRange.beg < s.end) + { + currRange.end = currRange.beg; + while(currRange.end < s.end && *currRange.end != ' ') + { + ++currRange.end; + } + + uint64_t ptr = 0; + if(!StrRangeToPtr(currRange, ptr)) + { + return false; + } + out.push_back(ptr); + + currRange.beg = currRange.end + 1; + } + return true; +} + //////////////////////////////////////////////////////////////////////////////// // LineSplit class diff --git a/src/VmaReplay/Common.h b/src/VmaReplay/Common.h index 28b5bbc..a78524b 100644 --- a/src/VmaReplay/Common.h +++ b/src/VmaReplay/Common.h @@ -111,6 +111,7 @@ inline bool StrRangeToBool(const StrRange& s, bool& out) return true; } +bool StrRangeToPtrList(const StrRange& s, std::vector& out); class LineSplit { diff --git a/src/VmaReplay/VmaReplay.cpp b/src/VmaReplay/VmaReplay.cpp index a9acfa5..79d9bde 100644 --- a/src/VmaReplay/VmaReplay.cpp +++ b/src/VmaReplay/VmaReplay.cpp @@ -71,8 +71,10 @@ enum class VMA_FUNCTION CreateImage, DestroyImage, FreeMemory, + FreeMemoryPages, CreateLostAllocation, AllocateMemory, + AllocateMemoryPages, AllocateMemoryForBuffer, AllocateMemoryForImage, MapMemory, @@ -94,8 +96,10 @@ static const char* VMA_FUNCTION_NAMES[] = { "vmaCreateImage", "vmaDestroyImage", "vmaFreeMemory", + "vmaFreeMemoryPages", "vmaCreateLostAllocation", "vmaAllocateMemory", + "vmaAllocateMemoryPages", "vmaAllocateMemoryForBuffer", "vmaAllocateMemoryForImage", "vmaMapMemory", @@ -145,7 +149,7 @@ static size_t g_DumpStatsAfterLineNextIndex = 0; static bool ValidateFileVersion() { if(GetVersionMajor(g_FileVersion) == 1 && - GetVersionMinor(g_FileVersion) <= 4) + GetVersionMinor(g_FileVersion) <= 5) { return true; } @@ -195,7 +199,7 @@ public: void RegisterCreateImage(uint32_t usage, uint32_t tiling); void RegisterCreateBuffer(uint32_t usage); void RegisterCreatePool(); - void RegisterCreateAllocation(); + void RegisterCreateAllocation(size_t allocCount = 1); void UpdateMemStats(const VmaStats& currStats); @@ -364,9 +368,9 @@ void Statistics::RegisterCreatePool() ++m_PoolCreationCount; } -void Statistics::RegisterCreateAllocation() +void Statistics::RegisterCreateAllocation(size_t allocCount) { - ++m_AllocationCreationCount; + m_AllocationCreationCount += allocCount; } void Statistics::UpdateMemStats(const VmaStats& currStats) @@ -955,10 +959,10 @@ private: }; struct Allocation { - uint32_t allocationFlags; - VmaAllocation allocation; - VkBuffer buffer; - VkImage image; + uint32_t allocationFlags = 0; + VmaAllocation allocation = VK_NULL_HANDLE; + VkBuffer buffer = VK_NULL_HANDLE; + VkImage image = VK_NULL_HANDLE; }; std::unordered_map m_Pools; std::unordered_map m_Allocations; @@ -1003,12 +1007,14 @@ private: void ExecuteDestroyPool(size_t lineNumber, const CsvSplit& csvSplit); void ExecuteSetAllocationUserData(size_t lineNumber, const CsvSplit& csvSplit); void ExecuteCreateBuffer(size_t lineNumber, const CsvSplit& csvSplit); - void ExecuteDestroyBuffer(size_t lineNumber, const CsvSplit& csvSplit) { m_Stats.RegisterFunctionCall(VMA_FUNCTION::DestroyBuffer); DestroyAllocation(lineNumber, csvSplit); } + void ExecuteDestroyBuffer(size_t lineNumber, const CsvSplit& csvSplit) { m_Stats.RegisterFunctionCall(VMA_FUNCTION::DestroyBuffer); DestroyAllocation(lineNumber, csvSplit, "vmaDestroyBuffer"); } void ExecuteCreateImage(size_t lineNumber, const CsvSplit& csvSplit); - void ExecuteDestroyImage(size_t lineNumber, const CsvSplit& csvSplit) { m_Stats.RegisterFunctionCall(VMA_FUNCTION::DestroyImage); DestroyAllocation(lineNumber, csvSplit); } - void ExecuteFreeMemory(size_t lineNumber, const CsvSplit& csvSplit) { m_Stats.RegisterFunctionCall(VMA_FUNCTION::FreeMemory); DestroyAllocation(lineNumber, csvSplit); } + void ExecuteDestroyImage(size_t lineNumber, const CsvSplit& csvSplit) { m_Stats.RegisterFunctionCall(VMA_FUNCTION::DestroyImage); DestroyAllocation(lineNumber, csvSplit, "vmaDestroyImage"); } + void ExecuteFreeMemory(size_t lineNumber, const CsvSplit& csvSplit) { m_Stats.RegisterFunctionCall(VMA_FUNCTION::FreeMemory); DestroyAllocation(lineNumber, csvSplit, "vmaFreeMemory"); } + void ExecuteFreeMemoryPages(size_t lineNumber, const CsvSplit& csvSplit); void ExecuteCreateLostAllocation(size_t lineNumber, const CsvSplit& csvSplit); void ExecuteAllocateMemory(size_t lineNumber, const CsvSplit& csvSplit); + void ExecuteAllocateMemoryPages(size_t lineNumber, const CsvSplit& csvSplit); void ExecuteAllocateMemoryForBufferOrImage(size_t lineNumber, const CsvSplit& csvSplit, OBJECT_TYPE objType); void ExecuteMapMemory(size_t lineNumber, const CsvSplit& csvSplit); void ExecuteUnmapMemory(size_t lineNumber, const CsvSplit& csvSplit); @@ -1019,7 +1025,7 @@ private: void ExecuteMakePoolAllocationsLost(size_t lineNumber, const CsvSplit& csvSplit); void ExecuteResizeAllocation(size_t lineNumber, const CsvSplit& csvSplit); - void DestroyAllocation(size_t lineNumber, const CsvSplit& csvSplit); + void DestroyAllocation(size_t lineNumber, const CsvSplit& csvSplit, const char* functionName); }; Player::Player() @@ -1121,45 +1127,49 @@ void Player::ExecuteLine(size_t lineNumber, const StrRange& line) // Nothing. } } - else if(StrRangeEq(functionName, "vmaCreatePool")) + else if(StrRangeEq(functionName, VMA_FUNCTION_NAMES[(uint32_t)VMA_FUNCTION::CreatePool])) ExecuteCreatePool(lineNumber, csvSplit); - else if(StrRangeEq(functionName, "vmaDestroyPool")) + else if(StrRangeEq(functionName, VMA_FUNCTION_NAMES[(uint32_t)VMA_FUNCTION::DestroyPool])) ExecuteDestroyPool(lineNumber, csvSplit); - else if(StrRangeEq(functionName, "vmaSetAllocationUserData")) + else if(StrRangeEq(functionName, VMA_FUNCTION_NAMES[(uint32_t)VMA_FUNCTION::SetAllocationUserData])) ExecuteSetAllocationUserData(lineNumber, csvSplit); - else if(StrRangeEq(functionName, "vmaCreateBuffer")) + else if(StrRangeEq(functionName, VMA_FUNCTION_NAMES[(uint32_t)VMA_FUNCTION::CreateBuffer])) ExecuteCreateBuffer(lineNumber, csvSplit); - else if(StrRangeEq(functionName, "vmaDestroyBuffer")) + else if(StrRangeEq(functionName, VMA_FUNCTION_NAMES[(uint32_t)VMA_FUNCTION::DestroyBuffer])) ExecuteDestroyBuffer(lineNumber, csvSplit); - else if(StrRangeEq(functionName, "vmaCreateImage")) + else if(StrRangeEq(functionName, VMA_FUNCTION_NAMES[(uint32_t)VMA_FUNCTION::CreateImage])) ExecuteCreateImage(lineNumber, csvSplit); - else if(StrRangeEq(functionName, "vmaDestroyImage")) + else if(StrRangeEq(functionName, VMA_FUNCTION_NAMES[(uint32_t)VMA_FUNCTION::DestroyImage])) ExecuteDestroyImage(lineNumber, csvSplit); - else if(StrRangeEq(functionName, "vmaFreeMemory")) + else if(StrRangeEq(functionName, VMA_FUNCTION_NAMES[(uint32_t)VMA_FUNCTION::FreeMemory])) ExecuteFreeMemory(lineNumber, csvSplit); - else if(StrRangeEq(functionName, "vmaCreateLostAllocation")) + else if(StrRangeEq(functionName, VMA_FUNCTION_NAMES[(uint32_t)VMA_FUNCTION::FreeMemoryPages])) + ExecuteFreeMemoryPages(lineNumber, csvSplit); + else if(StrRangeEq(functionName, VMA_FUNCTION_NAMES[(uint32_t)VMA_FUNCTION::CreateLostAllocation])) ExecuteCreateLostAllocation(lineNumber, csvSplit); - else if(StrRangeEq(functionName, "vmaAllocateMemory")) + else if(StrRangeEq(functionName, VMA_FUNCTION_NAMES[(uint32_t)VMA_FUNCTION::AllocateMemory])) ExecuteAllocateMemory(lineNumber, csvSplit); - else if(StrRangeEq(functionName, "vmaAllocateMemoryForBuffer")) + else if(StrRangeEq(functionName, VMA_FUNCTION_NAMES[(uint32_t)VMA_FUNCTION::AllocateMemoryPages])) + ExecuteAllocateMemoryPages(lineNumber, csvSplit); + else if(StrRangeEq(functionName, VMA_FUNCTION_NAMES[(uint32_t)VMA_FUNCTION::AllocateMemoryForBuffer])) ExecuteAllocateMemoryForBufferOrImage(lineNumber, csvSplit, OBJECT_TYPE::BUFFER); - else if(StrRangeEq(functionName, "vmaAllocateMemoryForImage")) + else if(StrRangeEq(functionName, VMA_FUNCTION_NAMES[(uint32_t)VMA_FUNCTION::AllocateMemoryForImage])) ExecuteAllocateMemoryForBufferOrImage(lineNumber, csvSplit, OBJECT_TYPE::IMAGE); - else if(StrRangeEq(functionName, "vmaMapMemory")) + else if(StrRangeEq(functionName, VMA_FUNCTION_NAMES[(uint32_t)VMA_FUNCTION::MapMemory])) ExecuteMapMemory(lineNumber, csvSplit); - else if(StrRangeEq(functionName, "vmaUnmapMemory")) + else if(StrRangeEq(functionName, VMA_FUNCTION_NAMES[(uint32_t)VMA_FUNCTION::UnmapMemory])) ExecuteUnmapMemory(lineNumber, csvSplit); - else if(StrRangeEq(functionName, "vmaFlushAllocation")) + else if(StrRangeEq(functionName, VMA_FUNCTION_NAMES[(uint32_t)VMA_FUNCTION::FlushAllocation])) ExecuteFlushAllocation(lineNumber, csvSplit); - else if(StrRangeEq(functionName, "vmaInvalidateAllocation")) + else if(StrRangeEq(functionName, VMA_FUNCTION_NAMES[(uint32_t)VMA_FUNCTION::InvalidateAllocation])) ExecuteInvalidateAllocation(lineNumber, csvSplit); - else if(StrRangeEq(functionName, "vmaTouchAllocation")) + else if(StrRangeEq(functionName, VMA_FUNCTION_NAMES[(uint32_t)VMA_FUNCTION::TouchAllocation])) ExecuteTouchAllocation(lineNumber, csvSplit); - else if(StrRangeEq(functionName, "vmaGetAllocationInfo")) + else if(StrRangeEq(functionName, VMA_FUNCTION_NAMES[(uint32_t)VMA_FUNCTION::GetAllocationInfo])) ExecuteGetAllocationInfo(lineNumber, csvSplit); - else if(StrRangeEq(functionName, "vmaMakePoolAllocationsLost")) + else if(StrRangeEq(functionName, VMA_FUNCTION_NAMES[(uint32_t)VMA_FUNCTION::MakePoolAllocationsLost])) ExecuteMakePoolAllocationsLost(lineNumber, csvSplit); - else if(StrRangeEq(functionName, "vmaResizeAllocation")) + else if(StrRangeEq(functionName, VMA_FUNCTION_NAMES[(uint32_t)VMA_FUNCTION::ResizeAllocation])) ExecuteResizeAllocation(lineNumber, csvSplit); else { @@ -2029,7 +2039,7 @@ void Player::ExecuteCreateBuffer(size_t lineNumber, const CsvSplit& csvSplit) } } -void Player::DestroyAllocation(size_t lineNumber, const CsvSplit& csvSplit) +void Player::DestroyAllocation(size_t lineNumber, const CsvSplit& csvSplit, const char* functionName) { if(ValidateFunctionParameterCount(lineNumber, csvSplit, 1, false)) { @@ -2059,7 +2069,7 @@ void Player::DestroyAllocation(size_t lineNumber, const CsvSplit& csvSplit) { if(IssueWarning()) { - printf("Line %zu: Invalid parameters for vmaDestroyBuffer.\n", lineNumber); + printf("Line %zu: Invalid parameters for %s.\n", lineNumber, functionName); } } } @@ -2127,6 +2137,53 @@ void Player::ExecuteCreateImage(size_t lineNumber, const CsvSplit& csvSplit) } } +void Player::ExecuteFreeMemoryPages(size_t lineNumber, const CsvSplit& csvSplit) +{ + m_Stats.RegisterFunctionCall(VMA_FUNCTION::FreeMemoryPages); + + if(ValidateFunctionParameterCount(lineNumber, csvSplit, 1, false)) + { + std::vector origAllocPtrs; + if(StrRangeToPtrList(csvSplit.GetRange(FIRST_PARAM_INDEX), origAllocPtrs)) + { + const size_t allocCount = origAllocPtrs.size(); + size_t notNullCount = 0; + for(size_t i = 0; i < allocCount; ++i) + { + const uint64_t origAllocPtr = origAllocPtrs[i]; + if(origAllocPtr != 0) + { + const auto it = m_Allocations.find(origAllocPtr); + if(it != m_Allocations.end()) + { + Destroy(it->second); + m_Allocations.erase(it); + ++notNullCount; + } + else + { + if(IssueWarning()) + { + printf("Line %zu: Allocation %llX not found.\n", lineNumber, origAllocPtr); + } + } + } + } + if(notNullCount) + { + UpdateMemStats(); + } + } + else + { + if(IssueWarning()) + { + printf("Line %zu: Invalid parameters for vmaFreeMemoryPages.\n", lineNumber); + } + } + } +} + void Player::ExecuteCreateLostAllocation(size_t lineNumber, const CsvSplit& csvSplit) { m_Stats.RegisterFunctionCall(VMA_FUNCTION::CreateLostAllocation); @@ -2206,6 +2263,68 @@ void Player::ExecuteAllocateMemory(size_t lineNumber, const CsvSplit& csvSplit) } } +void Player::ExecuteAllocateMemoryPages(size_t lineNumber, const CsvSplit& csvSplit) +{ + m_Stats.RegisterFunctionCall(VMA_FUNCTION::AllocateMemoryPages); + + if(ValidateFunctionParameterCount(lineNumber, csvSplit, 11, true)) + { + VkMemoryRequirements memReq = {}; + VmaAllocationCreateInfo allocCreateInfo = {}; + uint64_t origPool = 0; + std::vector origPtrs; + + if(StrRangeToUint(csvSplit.GetRange(FIRST_PARAM_INDEX), memReq.size) && + StrRangeToUint(csvSplit.GetRange(FIRST_PARAM_INDEX + 1), memReq.alignment) && + StrRangeToUint(csvSplit.GetRange(FIRST_PARAM_INDEX + 2), memReq.memoryTypeBits) && + StrRangeToUint(csvSplit.GetRange(FIRST_PARAM_INDEX + 3), allocCreateInfo.flags) && + StrRangeToUint(csvSplit.GetRange(FIRST_PARAM_INDEX + 4), (uint32_t&)allocCreateInfo.usage) && + StrRangeToUint(csvSplit.GetRange(FIRST_PARAM_INDEX + 5), allocCreateInfo.requiredFlags) && + StrRangeToUint(csvSplit.GetRange(FIRST_PARAM_INDEX + 6), allocCreateInfo.preferredFlags) && + StrRangeToUint(csvSplit.GetRange(FIRST_PARAM_INDEX + 7), allocCreateInfo.memoryTypeBits) && + StrRangeToPtr(csvSplit.GetRange(FIRST_PARAM_INDEX + 8), origPool) && + StrRangeToPtrList(csvSplit.GetRange(FIRST_PARAM_INDEX + 9), origPtrs)) + { + const size_t allocCount = origPtrs.size(); + if(allocCount > 0) + { + FindPool(lineNumber, origPool, allocCreateInfo.pool); + + if(csvSplit.GetCount() > FIRST_PARAM_INDEX + 10) + { + PrepareUserData( + lineNumber, + allocCreateInfo.flags, + csvSplit.GetRange(FIRST_PARAM_INDEX + 10), + csvSplit.GetLine(), + allocCreateInfo.pUserData); + } + + UpdateMemStats(); + m_Stats.RegisterCreateAllocation(allocCount); + + std::vector allocations(allocCount); + + VkResult res = vmaAllocateMemoryPages(m_Allocator, &memReq, &allocCreateInfo, allocCount, allocations.data(), nullptr); + for(size_t i = 0; i < allocCount; ++i) + { + Allocation allocDesc = {}; + allocDesc.allocationFlags = allocCreateInfo.flags; + allocDesc.allocation = allocations[i]; + AddAllocation(lineNumber, origPtrs[i], res, "vmaAllocateMemoryPages", std::move(allocDesc)); + } + } + } + else + { + if(IssueWarning()) + { + printf("Line %zu: Invalid parameters for vmaAllocateMemoryPages.\n", lineNumber); + } + } + } +} + void Player::ExecuteAllocateMemoryForBufferOrImage(size_t lineNumber, const CsvSplit& csvSplit, OBJECT_TYPE objType) { switch(objType) diff --git a/src/vk_mem_alloc.h b/src/vk_mem_alloc.h index eebeb0f..36dcae3 100644 --- a/src/vk_mem_alloc.h +++ b/src/vk_mem_alloc.h @@ -5676,6 +5676,11 @@ public: const VkMemoryRequirements& vkMemReq, const VmaAllocationCreateInfo& createInfo, VmaAllocation allocation); + void RecordAllocateMemoryPages(uint32_t frameIndex, + const VkMemoryRequirements& vkMemReq, + const VmaAllocationCreateInfo& createInfo, + uint64_t allocationCount, + const VmaAllocation* pAllocations); void RecordAllocateMemoryForBuffer(uint32_t frameIndex, const VkMemoryRequirements& vkMemReq, bool requiresDedicatedAllocation, @@ -5690,6 +5695,9 @@ public: VmaAllocation allocation); void RecordFreeMemory(uint32_t frameIndex, VmaAllocation allocation); + void RecordFreeMemoryPages(uint32_t frameIndex, + uint64_t allocationCount, + const VmaAllocation* pAllocations); void RecordResizeAllocation( uint32_t frameIndex, VmaAllocation allocation, @@ -5752,6 +5760,7 @@ private: int64_t m_StartCounter; void GetBasicParams(CallParams& outParams); + void PrintPointerList(uint64_t count, const VmaAllocation* pItems); void Flush(); }; @@ -11661,7 +11670,7 @@ VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex) // Write header. fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording"); - fprintf(m_File, "%s\n", "1,4"); + fprintf(m_File, "%s\n", "1,5"); return VK_SUCCESS; } @@ -11747,6 +11756,32 @@ void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex, Flush(); } +void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex, + const VkMemoryRequirements& vkMemReq, + const VmaAllocationCreateInfo& createInfo, + uint64_t allocationCount, + const VmaAllocation* pAllocations) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + UserDataString userDataStr(createInfo.flags, createInfo.pUserData); + fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex, + vkMemReq.size, + vkMemReq.alignment, + vkMemReq.memoryTypeBits, + createInfo.flags, + createInfo.usage, + createInfo.requiredFlags, + createInfo.preferredFlags, + createInfo.memoryTypeBits, + createInfo.pool); + PrintPointerList(allocationCount, pAllocations); + fprintf(m_File, ",%s\n", userDataStr.GetString()); + Flush(); +} + void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex, const VkMemoryRequirements& vkMemReq, bool requiresDedicatedAllocation, @@ -11817,6 +11852,20 @@ void VmaRecorder::RecordFreeMemory(uint32_t frameIndex, Flush(); } +void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex, + uint64_t allocationCount, + const VmaAllocation* pAllocations) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex); + PrintPointerList(allocationCount, pAllocations); + fprintf(m_File, "\n"); + Flush(); +} + void VmaRecorder::RecordResizeAllocation( uint32_t frameIndex, VmaAllocation allocation, @@ -12108,6 +12157,18 @@ void VmaRecorder::GetBasicParams(CallParams& outParams) outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq; } +void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems) +{ + if(count) + { + fprintf(m_File, "%p", pItems[0]); + for(uint64_t i = 1; i < count; ++i) + { + fprintf(m_File, " %p", pItems[i]); + } + } +} + void VmaRecorder::Flush() { if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0) @@ -14220,14 +14281,12 @@ VkResult vmaAllocateMemoryPages( #if VMA_RECORDING_ENABLED if(allocator->GetRecorder() != VMA_NULL) { - // TODO: Extend recording format with this function. - /* allocator->GetRecorder()->RecordAllocateMemoryPages( allocator->GetCurrentFrameIndex(), *pVkMemoryRequirements, *pCreateInfo, - *pAllocation); - */ + (uint64_t)allocationCount, + pAllocations); } #endif @@ -14391,15 +14450,13 @@ void vmaFreeMemoryPages( VMA_DEBUG_GLOBAL_MUTEX_LOCK #if VMA_RECORDING_ENABLED - // TODO Add this to recording file format. - /* if(allocator->GetRecorder() != VMA_NULL) { allocator->GetRecorder()->RecordFreeMemoryPages( allocator->GetCurrentFrameIndex(), - allocation); + (uint64_t)allocationCount, + pAllocations); } - */ #endif allocator->FreeMemory(allocationCount, pAllocations);