diff --git a/docs/html/defragmentation.html b/docs/html/defragmentation.html index 090a6c3..c180c52 100644 --- a/docs/html/defragmentation.html +++ b/docs/html/defragmentation.html @@ -78,7 +78,7 @@ $(function() {

What it doesn't do, so you need to do it yourself:

@@ -89,7 +89,7 @@ Defragmenting CPU memory

  • It temporarily maps entire memory blocks when necessary.
  • It moves data using memmove() function.
  • -
    // Given following variables already initialized:
    VkDevice device;
    VmaAllocator allocator;
    std::vector<VkBuffer> buffers;
    std::vector<VmaAllocation> allocations;
    const uint32_t allocCount = (uint32_t)allocations.size();
    std::vector<VkBool32> allocationsChanged(allocCount);
    VmaDefragmentationInfo2 defragInfo = {};
    defragInfo.allocationCount = allocCount;
    defragInfo.pAllocations = allocations.data();
    defragInfo.pAllocationsChanged = allocationsChanged.data();
    defragInfo.maxCpuBytesToMove = VK_WHOLE_SIZE; // No limit.
    defragInfo.maxCpuAllocationsToMove = UINT32_MAX; // No limit.
    VmaDefragmentationContext defragCtx;
    vmaDefragmentationBegin(allocator, &defragInfo, nullptr, &defragCtx);
    vmaDefragmentationEnd(allocator, defragCtx);
    for(uint32_t i = 0; i < allocCount; ++i)
    {
    if(allocationsChanged[i])
    {
    // Destroy buffer that is immutably bound to memory region which is no longer valid.
    vkDestroyBuffer(device, buffers[i], nullptr);
    // Create new buffer with same parameters.
    VkBufferCreateInfo bufferInfo = ...;
    vkCreateBuffer(device, &bufferInfo, nullptr, &buffers[i]);
    // You can make dummy call to vkGetBufferMemoryRequirements here to silence validation layer warning.
    // Bind new buffer to new memory region. Data contained in it is already moved.
    VmaAllocationInfo allocInfo;
    vmaGetAllocationInfo(allocator, allocations[i], &allocInfo);
    vkBindBufferMemory(device, buffers[i], allocInfo.deviceMemory, allocInfo.offset);
    }
    }

    Setting VmaDefragmentationInfo2::pAllocationsChanged is optional. This output array tells whether particular allocation in VmaDefragmentationInfo2::pAllocations at the same index has been modified during defragmentation. You can pass null, but you then need to query every allocation passed to defragmentation for new parameters using vmaGetAllocationInfo() if you might need to recreate and rebind a buffer or image associated with it.

    +
    // Given following variables already initialized:
    VkDevice device;
    VmaAllocator allocator;
    std::vector<VkBuffer> buffers;
    std::vector<VmaAllocation> allocations;
    const uint32_t allocCount = (uint32_t)allocations.size();
    std::vector<VkBool32> allocationsChanged(allocCount);
    VmaDefragmentationInfo2 defragInfo = {};
    defragInfo.allocationCount = allocCount;
    defragInfo.pAllocations = allocations.data();
    defragInfo.pAllocationsChanged = allocationsChanged.data();
    defragInfo.maxCpuBytesToMove = VK_WHOLE_SIZE; // No limit.
    defragInfo.maxCpuAllocationsToMove = UINT32_MAX; // No limit.
    VmaDefragmentationContext defragCtx;
    vmaDefragmentationBegin(allocator, &defragInfo, nullptr, &defragCtx);
    vmaDefragmentationEnd(allocator, defragCtx);
    for(uint32_t i = 0; i < allocCount; ++i)
    {
    if(allocationsChanged[i])
    {
    // Destroy buffer that is immutably bound to memory region which is no longer valid.
    vkDestroyBuffer(device, buffers[i], nullptr);
    // Create new buffer with same parameters.
    VkBufferCreateInfo bufferInfo = ...;
    vkCreateBuffer(device, &bufferInfo, nullptr, &buffers[i]);
    // You can make dummy call to vkGetBufferMemoryRequirements here to silence validation layer warning.
    // Bind new buffer to new memory region. Data contained in it is already moved.
    VmaAllocationInfo allocInfo;
    vmaGetAllocationInfo(allocator, allocations[i], &allocInfo);
    vmaBindBufferMemory(allocator, allocations[i], buffers[i]);
    }
    }

    Setting VmaDefragmentationInfo2::pAllocationsChanged is optional. This output array tells whether particular allocation in VmaDefragmentationInfo2::pAllocations at the same index has been modified during defragmentation. You can pass null, but you then need to query every allocation passed to defragmentation for new parameters using vmaGetAllocationInfo() if you might need to recreate and rebind a buffer or image associated with it.

    If you use Custom memory pools, you can fill VmaDefragmentationInfo2::poolCount and VmaDefragmentationInfo2::pPools instead of VmaDefragmentationInfo2::allocationCount and VmaDefragmentationInfo2::pAllocations to defragment all allocations in given pools. You cannot use VmaDefragmentationInfo2::pAllocationsChanged in that case. You can also combine both methods.

    Defragmenting GPU memory

    @@ -99,7 +99,7 @@ Defragmenting GPU memory
  • It issues vkCmdCopyBuffer() to passed command buffer.
  • Example:

    -
    // Given following variables already initialized:
    VkDevice device;
    VmaAllocator allocator;
    VkCommandBuffer commandBuffer;
    std::vector<VkBuffer> buffers;
    std::vector<VmaAllocation> allocations;
    const uint32_t allocCount = (uint32_t)allocations.size();
    std::vector<VkBool32> allocationsChanged(allocCount);
    VkCommandBufferBeginInfo cmdBufBeginInfo = ...;
    vkBeginCommandBuffer(commandBuffer, &cmdBufBeginInfo);
    VmaDefragmentationInfo2 defragInfo = {};
    defragInfo.allocationCount = allocCount;
    defragInfo.pAllocations = allocations.data();
    defragInfo.pAllocationsChanged = allocationsChanged.data();
    defragInfo.maxGpuBytesToMove = VK_WHOLE_SIZE; // Notice it's "GPU" this time.
    defragInfo.maxGpuAllocationsToMove = UINT32_MAX; // Notice it's "GPU" this time.
    defragInfo.commandBuffer = commandBuffer;
    VmaDefragmentationContext defragCtx;
    vmaDefragmentationBegin(allocator, &defragInfo, nullptr, &defragCtx);
    vkEndCommandBuffer(commandBuffer);
    // Submit commandBuffer.
    // Wait for a fence that ensures commandBuffer execution finished.
    vmaDefragmentationEnd(allocator, defragCtx);
    for(uint32_t i = 0; i < allocCount; ++i)
    {
    if(allocationsChanged[i])
    {
    // Destroy buffer that is immutably bound to memory region which is no longer valid.
    vkDestroyBuffer(device, buffers[i], nullptr);
    // Create new buffer with same parameters.
    VkBufferCreateInfo bufferInfo = ...;
    vkCreateBuffer(device, &bufferInfo, nullptr, &buffers[i]);
    // You can make dummy call to vkGetBufferMemoryRequirements here to silence validation layer warning.
    // Bind new buffer to new memory region. Data contained in it is already moved.
    VmaAllocationInfo allocInfo;
    vmaGetAllocationInfo(allocator, allocations[i], &allocInfo);
    vkBindBufferMemory(device, buffers[i], allocInfo.deviceMemory, allocInfo.offset);
    }
    }

    You can combine these two methods by specifying non-zero maxGpu* as well as maxCpu* parameters. The library automatically chooses best method to defragment each memory pool.

    +
    // Given following variables already initialized:
    VkDevice device;
    VmaAllocator allocator;
    VkCommandBuffer commandBuffer;
    std::vector<VkBuffer> buffers;
    std::vector<VmaAllocation> allocations;
    const uint32_t allocCount = (uint32_t)allocations.size();
    std::vector<VkBool32> allocationsChanged(allocCount);
    VkCommandBufferBeginInfo cmdBufBeginInfo = ...;
    vkBeginCommandBuffer(commandBuffer, &cmdBufBeginInfo);
    VmaDefragmentationInfo2 defragInfo = {};
    defragInfo.allocationCount = allocCount;
    defragInfo.pAllocations = allocations.data();
    defragInfo.pAllocationsChanged = allocationsChanged.data();
    defragInfo.maxGpuBytesToMove = VK_WHOLE_SIZE; // Notice it's "GPU" this time.
    defragInfo.maxGpuAllocationsToMove = UINT32_MAX; // Notice it's "GPU" this time.
    defragInfo.commandBuffer = commandBuffer;
    VmaDefragmentationContext defragCtx;
    vmaDefragmentationBegin(allocator, &defragInfo, nullptr, &defragCtx);
    vkEndCommandBuffer(commandBuffer);
    // Submit commandBuffer.
    // Wait for a fence that ensures commandBuffer execution finished.
    vmaDefragmentationEnd(allocator, defragCtx);
    for(uint32_t i = 0; i < allocCount; ++i)
    {
    if(allocationsChanged[i])
    {
    // Destroy buffer that is immutably bound to memory region which is no longer valid.
    vkDestroyBuffer(device, buffers[i], nullptr);
    // Create new buffer with same parameters.
    VkBufferCreateInfo bufferInfo = ...;
    vkCreateBuffer(device, &bufferInfo, nullptr, &buffers[i]);
    // You can make dummy call to vkGetBufferMemoryRequirements here to silence validation layer warning.
    // Bind new buffer to new memory region. Data contained in it is already moved.
    VmaAllocationInfo allocInfo;
    vmaGetAllocationInfo(allocator, allocations[i], &allocInfo);
    vmaBindBufferMemory(allocator, allocations[i], buffers[i]);
    }
    }

    You can combine these two methods by specifying non-zero maxGpu* as well as maxCpu* parameters. The library automatically chooses best method to defragment each memory pool.

    You may try not to block your entire program to wait until defragmentation finishes, but do it in the background, as long as you carefully fullfill requirements described in function vmaDefragmentationBegin().

    Additional notes

    diff --git a/docs/html/vk__mem__alloc_8h_source.html b/docs/html/vk__mem__alloc_8h_source.html index 8a8373c..00bd1a9 100644 --- a/docs/html/vk__mem__alloc_8h_source.html +++ b/docs/html/vk__mem__alloc_8h_source.html @@ -65,212 +65,212 @@ $(function() {
    vk_mem_alloc.h
    -Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    1677 /*
    1678 Define this macro to 0/1 to disable/enable support for recording functionality,
    1679 available through VmaAllocatorCreateInfo::pRecordSettings.
    1680 */
    1681 #ifndef VMA_RECORDING_ENABLED
    1682  #ifdef _WIN32
    1683  #define VMA_RECORDING_ENABLED 1
    1684  #else
    1685  #define VMA_RECORDING_ENABLED 0
    1686  #endif
    1687 #endif
    1688 
    1689 #ifndef NOMINMAX
    1690  #define NOMINMAX // For windows.h
    1691 #endif
    1692 
    1693 #ifndef VULKAN_H_
    1694  #include <vulkan/vulkan.h>
    1695 #endif
    1696 
    1697 #if VMA_RECORDING_ENABLED
    1698  #include <windows.h>
    1699 #endif
    1700 
    1701 #if !defined(VMA_DEDICATED_ALLOCATION)
    1702  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
    1703  #define VMA_DEDICATED_ALLOCATION 1
    1704  #else
    1705  #define VMA_DEDICATED_ALLOCATION 0
    1706  #endif
    1707 #endif
    1708 
    1718 VK_DEFINE_HANDLE(VmaAllocator)
    1719 
    1720 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    1722  VmaAllocator allocator,
    1723  uint32_t memoryType,
    1724  VkDeviceMemory memory,
    1725  VkDeviceSize size);
    1727 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    1728  VmaAllocator allocator,
    1729  uint32_t memoryType,
    1730  VkDeviceMemory memory,
    1731  VkDeviceSize size);
    1732 
    1746 
    1776 
    1779 typedef VkFlags VmaAllocatorCreateFlags;
    1780 
    1785 typedef struct VmaVulkanFunctions {
    1786  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    1787  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    1788  PFN_vkAllocateMemory vkAllocateMemory;
    1789  PFN_vkFreeMemory vkFreeMemory;
    1790  PFN_vkMapMemory vkMapMemory;
    1791  PFN_vkUnmapMemory vkUnmapMemory;
    1792  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
    1793  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
    1794  PFN_vkBindBufferMemory vkBindBufferMemory;
    1795  PFN_vkBindImageMemory vkBindImageMemory;
    1796  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    1797  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    1798  PFN_vkCreateBuffer vkCreateBuffer;
    1799  PFN_vkDestroyBuffer vkDestroyBuffer;
    1800  PFN_vkCreateImage vkCreateImage;
    1801  PFN_vkDestroyImage vkDestroyImage;
    1802  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
    1803 #if VMA_DEDICATED_ALLOCATION
    1804  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    1805  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    1806 #endif
    1808 
    1810 typedef enum VmaRecordFlagBits {
    1817 
    1820 typedef VkFlags VmaRecordFlags;
    1821 
    1823 typedef struct VmaRecordSettings
    1824 {
    1834  const char* pFilePath;
    1836 
    1839 {
    1843 
    1844  VkPhysicalDevice physicalDevice;
    1846 
    1847  VkDevice device;
    1849 
    1852 
    1853  const VkAllocationCallbacks* pAllocationCallbacks;
    1855 
    1895  const VkDeviceSize* pHeapSizeLimit;
    1916 
    1918 VkResult vmaCreateAllocator(
    1919  const VmaAllocatorCreateInfo* pCreateInfo,
    1920  VmaAllocator* pAllocator);
    1921 
    1923 void vmaDestroyAllocator(
    1924  VmaAllocator allocator);
    1925 
    1931  VmaAllocator allocator,
    1932  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    1933 
    1939  VmaAllocator allocator,
    1940  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    1941 
    1949  VmaAllocator allocator,
    1950  uint32_t memoryTypeIndex,
    1951  VkMemoryPropertyFlags* pFlags);
    1952 
    1962  VmaAllocator allocator,
    1963  uint32_t frameIndex);
    1964 
    1967 typedef struct VmaStatInfo
    1968 {
    1970  uint32_t blockCount;
    1976  VkDeviceSize usedBytes;
    1978  VkDeviceSize unusedBytes;
    1981 } VmaStatInfo;
    1982 
    1984 typedef struct VmaStats
    1985 {
    1986  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    1987  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    1989 } VmaStats;
    1990 
    1992 void vmaCalculateStats(
    1993  VmaAllocator allocator,
    1994  VmaStats* pStats);
    1995 
    1996 #ifndef VMA_STATS_STRING_ENABLED
    1997 #define VMA_STATS_STRING_ENABLED 1
    1998 #endif
    1999 
    2000 #if VMA_STATS_STRING_ENABLED
    2001 
    2003 
    2005 void vmaBuildStatsString(
    2006  VmaAllocator allocator,
    2007  char** ppStatsString,
    2008  VkBool32 detailedMap);
    2009 
    2010 void vmaFreeStatsString(
    2011  VmaAllocator allocator,
    2012  char* pStatsString);
    2013 
    2014 #endif // #if VMA_STATS_STRING_ENABLED
    2015 
    2024 VK_DEFINE_HANDLE(VmaPool)
    2025 
    2026 typedef enum VmaMemoryUsage
    2027 {
    2076 } VmaMemoryUsage;
    2077 
    2087 
    2148 
    2164 
    2174 
    2181 
    2185 
    2187 {
    2200  VkMemoryPropertyFlags requiredFlags;
    2205  VkMemoryPropertyFlags preferredFlags;
    2213  uint32_t memoryTypeBits;
    2226  void* pUserData;
    2228 
    2245 VkResult vmaFindMemoryTypeIndex(
    2246  VmaAllocator allocator,
    2247  uint32_t memoryTypeBits,
    2248  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2249  uint32_t* pMemoryTypeIndex);
    2250 
    2264  VmaAllocator allocator,
    2265  const VkBufferCreateInfo* pBufferCreateInfo,
    2266  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2267  uint32_t* pMemoryTypeIndex);
    2268 
    2282  VmaAllocator allocator,
    2283  const VkImageCreateInfo* pImageCreateInfo,
    2284  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2285  uint32_t* pMemoryTypeIndex);
    2286 
    2307 
    2324 
    2335 
    2341 
    2344 typedef VkFlags VmaPoolCreateFlags;
    2345 
    2348 typedef struct VmaPoolCreateInfo {
    2363  VkDeviceSize blockSize;
    2392 
    2395 typedef struct VmaPoolStats {
    2398  VkDeviceSize size;
    2401  VkDeviceSize unusedSize;
    2414  VkDeviceSize unusedRangeSizeMax;
    2417  size_t blockCount;
    2418 } VmaPoolStats;
    2419 
    2426 VkResult vmaCreatePool(
    2427  VmaAllocator allocator,
    2428  const VmaPoolCreateInfo* pCreateInfo,
    2429  VmaPool* pPool);
    2430 
    2433 void vmaDestroyPool(
    2434  VmaAllocator allocator,
    2435  VmaPool pool);
    2436 
    2443 void vmaGetPoolStats(
    2444  VmaAllocator allocator,
    2445  VmaPool pool,
    2446  VmaPoolStats* pPoolStats);
    2447 
    2455  VmaAllocator allocator,
    2456  VmaPool pool,
    2457  size_t* pLostAllocationCount);
    2458 
    2473 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
    2474 
    2499 VK_DEFINE_HANDLE(VmaAllocation)
    2500 
    2501 
    2503 typedef struct VmaAllocationInfo {
    2508  uint32_t memoryType;
    2517  VkDeviceMemory deviceMemory;
    2522  VkDeviceSize offset;
    2527  VkDeviceSize size;
    2541  void* pUserData;
    2543 
    2554 VkResult vmaAllocateMemory(
    2555  VmaAllocator allocator,
    2556  const VkMemoryRequirements* pVkMemoryRequirements,
    2557  const VmaAllocationCreateInfo* pCreateInfo,
    2558  VmaAllocation* pAllocation,
    2559  VmaAllocationInfo* pAllocationInfo);
    2560 
    2580 VkResult vmaAllocateMemoryPages(
    2581  VmaAllocator allocator,
    2582  const VkMemoryRequirements* pVkMemoryRequirements,
    2583  const VmaAllocationCreateInfo* pCreateInfo,
    2584  size_t allocationCount,
    2585  VmaAllocation* pAllocations,
    2586  VmaAllocationInfo* pAllocationInfo);
    2587 
    2595  VmaAllocator allocator,
    2596  VkBuffer buffer,
    2597  const VmaAllocationCreateInfo* pCreateInfo,
    2598  VmaAllocation* pAllocation,
    2599  VmaAllocationInfo* pAllocationInfo);
    2600 
    2602 VkResult vmaAllocateMemoryForImage(
    2603  VmaAllocator allocator,
    2604  VkImage image,
    2605  const VmaAllocationCreateInfo* pCreateInfo,
    2606  VmaAllocation* pAllocation,
    2607  VmaAllocationInfo* pAllocationInfo);
    2608 
    2613 void vmaFreeMemory(
    2614  VmaAllocator allocator,
    2615  VmaAllocation allocation);
    2616 
    2627 void vmaFreeMemoryPages(
    2628  VmaAllocator allocator,
    2629  size_t allocationCount,
    2630  VmaAllocation* pAllocations);
    2631 
    2652 VkResult vmaResizeAllocation(
    2653  VmaAllocator allocator,
    2654  VmaAllocation allocation,
    2655  VkDeviceSize newSize);
    2656 
    2674  VmaAllocator allocator,
    2675  VmaAllocation allocation,
    2676  VmaAllocationInfo* pAllocationInfo);
    2677 
    2692 VkBool32 vmaTouchAllocation(
    2693  VmaAllocator allocator,
    2694  VmaAllocation allocation);
    2695 
    2710  VmaAllocator allocator,
    2711  VmaAllocation allocation,
    2712  void* pUserData);
    2713 
    2725  VmaAllocator allocator,
    2726  VmaAllocation* pAllocation);
    2727 
    2762 VkResult vmaMapMemory(
    2763  VmaAllocator allocator,
    2764  VmaAllocation allocation,
    2765  void** ppData);
    2766 
    2771 void vmaUnmapMemory(
    2772  VmaAllocator allocator,
    2773  VmaAllocation allocation);
    2774 
    2791 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2792 
    2809 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2810 
    2827 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
    2828 
    2835 VK_DEFINE_HANDLE(VmaDefragmentationContext)
    2836 
    2837 typedef enum VmaDefragmentationFlagBits {
    2841 typedef VkFlags VmaDefragmentationFlags;
    2842 
    2847 typedef struct VmaDefragmentationInfo2 {
    2871  uint32_t poolCount;
    2892  VkDeviceSize maxCpuBytesToMove;
    2902  VkDeviceSize maxGpuBytesToMove;
    2916  VkCommandBuffer commandBuffer;
    2918 
    2923 typedef struct VmaDefragmentationInfo {
    2928  VkDeviceSize maxBytesToMove;
    2935 
    2937 typedef struct VmaDefragmentationStats {
    2939  VkDeviceSize bytesMoved;
    2941  VkDeviceSize bytesFreed;
    2947 
    2977 VkResult vmaDefragmentationBegin(
    2978  VmaAllocator allocator,
    2979  const VmaDefragmentationInfo2* pInfo,
    2980  VmaDefragmentationStats* pStats,
    2981  VmaDefragmentationContext *pContext);
    2982 
    2988 VkResult vmaDefragmentationEnd(
    2989  VmaAllocator allocator,
    2990  VmaDefragmentationContext context);
    2991 
    3032 VkResult vmaDefragment(
    3033  VmaAllocator allocator,
    3034  VmaAllocation* pAllocations,
    3035  size_t allocationCount,
    3036  VkBool32* pAllocationsChanged,
    3037  const VmaDefragmentationInfo *pDefragmentationInfo,
    3038  VmaDefragmentationStats* pDefragmentationStats);
    3039 
    3052 VkResult vmaBindBufferMemory(
    3053  VmaAllocator allocator,
    3054  VmaAllocation allocation,
    3055  VkBuffer buffer);
    3056 
    3069 VkResult vmaBindImageMemory(
    3070  VmaAllocator allocator,
    3071  VmaAllocation allocation,
    3072  VkImage image);
    3073 
    3100 VkResult vmaCreateBuffer(
    3101  VmaAllocator allocator,
    3102  const VkBufferCreateInfo* pBufferCreateInfo,
    3103  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    3104  VkBuffer* pBuffer,
    3105  VmaAllocation* pAllocation,
    3106  VmaAllocationInfo* pAllocationInfo);
    3107 
    3119 void vmaDestroyBuffer(
    3120  VmaAllocator allocator,
    3121  VkBuffer buffer,
    3122  VmaAllocation allocation);
    3123 
    3125 VkResult vmaCreateImage(
    3126  VmaAllocator allocator,
    3127  const VkImageCreateInfo* pImageCreateInfo,
    3128  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    3129  VkImage* pImage,
    3130  VmaAllocation* pAllocation,
    3131  VmaAllocationInfo* pAllocationInfo);
    3132 
    3144 void vmaDestroyImage(
    3145  VmaAllocator allocator,
    3146  VkImage image,
    3147  VmaAllocation allocation);
    3148 
    3149 #ifdef __cplusplus
    3150 }
    3151 #endif
    3152 
    3153 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    3154 
    3155 // For Visual Studio IntelliSense.
    3156 #if defined(__cplusplus) && defined(__INTELLISENSE__)
    3157 #define VMA_IMPLEMENTATION
    3158 #endif
    3159 
    3160 #ifdef VMA_IMPLEMENTATION
    3161 #undef VMA_IMPLEMENTATION
    3162 
    3163 #include <cstdint>
    3164 #include <cstdlib>
    3165 #include <cstring>
    3166 
    3167 /*******************************************************************************
    3168 CONFIGURATION SECTION
    3169 
    3170 Define some of these macros before each #include of this header or change them
    3171 here if you need other then default behavior depending on your environment.
    3172 */
    3173 
    3174 /*
    3175 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    3176 internally, like:
    3177 
    3178  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    3179 
    3180 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    3181 VmaAllocatorCreateInfo::pVulkanFunctions.
    3182 */
    3183 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
    3184 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    3185 #endif
    3186 
    3187 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    3188 //#define VMA_USE_STL_CONTAINERS 1
    3189 
    3190 /* Set this macro to 1 to make the library including and using STL containers:
    3191 std::pair, std::vector, std::list, std::unordered_map.
    3192 
    3193 Set it to 0 or undefined to make the library using its own implementation of
    3194 the containers.
    3195 */
    3196 #if VMA_USE_STL_CONTAINERS
    3197  #define VMA_USE_STL_VECTOR 1
    3198  #define VMA_USE_STL_UNORDERED_MAP 1
    3199  #define VMA_USE_STL_LIST 1
    3200 #endif
    3201 
    3202 #ifndef VMA_USE_STL_SHARED_MUTEX
    3203  // Compiler conforms to C++17.
    3204  #if __cplusplus >= 201703L
    3205  #define VMA_USE_STL_SHARED_MUTEX 1
    3206  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
    3207  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
    3208  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
    3209  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
    3210  #define VMA_USE_STL_SHARED_MUTEX 1
    3211  #else
    3212  #define VMA_USE_STL_SHARED_MUTEX 0
    3213  #endif
    3214 #endif
    3215 
    3216 /*
    3217 THESE INCLUDES ARE NOT ENABLED BY DEFAULT.
    3218 Library has its own container implementation.
    3219 */
    3220 #if VMA_USE_STL_VECTOR
    3221  #include <vector>
    3222 #endif
    3223 
    3224 #if VMA_USE_STL_UNORDERED_MAP
    3225  #include <unordered_map>
    3226 #endif
    3227 
    3228 #if VMA_USE_STL_LIST
    3229  #include <list>
    3230 #endif
    3231 
    3232 /*
    3233 Following headers are used in this CONFIGURATION section only, so feel free to
    3234 remove them if not needed.
    3235 */
    3236 #include <cassert> // for assert
    3237 #include <algorithm> // for min, max
    3238 #include <mutex>
    3239 
    3240 #ifndef VMA_NULL
    3241  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    3242  #define VMA_NULL nullptr
    3243 #endif
    3244 
    3245 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
    3246 #include <cstdlib>
    3247 void *aligned_alloc(size_t alignment, size_t size)
    3248 {
    3249  // alignment must be >= sizeof(void*)
    3250  if(alignment < sizeof(void*))
    3251  {
    3252  alignment = sizeof(void*);
    3253  }
    3254 
    3255  return memalign(alignment, size);
    3256 }
    3257 #elif defined(__APPLE__) || defined(__ANDROID__)
    3258 #include <cstdlib>
    3259 void *aligned_alloc(size_t alignment, size_t size)
    3260 {
    3261  // alignment must be >= sizeof(void*)
    3262  if(alignment < sizeof(void*))
    3263  {
    3264  alignment = sizeof(void*);
    3265  }
    3266 
    3267  void *pointer;
    3268  if(posix_memalign(&pointer, alignment, size) == 0)
    3269  return pointer;
    3270  return VMA_NULL;
    3271 }
    3272 #endif
    3273 
    3274 // If your compiler is not compatible with C++11 and definition of
    3275 // aligned_alloc() function is missing, uncommeting following line may help:
    3276 
    3277 //#include <malloc.h>
    3278 
    3279 // Normal assert to check for programmer's errors, especially in Debug configuration.
    3280 #ifndef VMA_ASSERT
    3281  #ifdef _DEBUG
    3282  #define VMA_ASSERT(expr) assert(expr)
    3283  #else
    3284  #define VMA_ASSERT(expr)
    3285  #endif
    3286 #endif
    3287 
    3288 // Assert that will be called very often, like inside data structures e.g. operator[].
    3289 // Making it non-empty can make program slow.
    3290 #ifndef VMA_HEAVY_ASSERT
    3291  #ifdef _DEBUG
    3292  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    3293  #else
    3294  #define VMA_HEAVY_ASSERT(expr)
    3295  #endif
    3296 #endif
    3297 
    3298 #ifndef VMA_ALIGN_OF
    3299  #define VMA_ALIGN_OF(type) (__alignof(type))
    3300 #endif
    3301 
    3302 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    3303  #if defined(_WIN32)
    3304  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    3305  #else
    3306  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    3307  #endif
    3308 #endif
    3309 
    3310 #ifndef VMA_SYSTEM_FREE
    3311  #if defined(_WIN32)
    3312  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    3313  #else
    3314  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    3315  #endif
    3316 #endif
    3317 
    3318 #ifndef VMA_MIN
    3319  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    3320 #endif
    3321 
    3322 #ifndef VMA_MAX
    3323  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    3324 #endif
    3325 
    3326 #ifndef VMA_SWAP
    3327  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    3328 #endif
    3329 
    3330 #ifndef VMA_SORT
    3331  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    3332 #endif
    3333 
    3334 #ifndef VMA_DEBUG_LOG
    3335  #define VMA_DEBUG_LOG(format, ...)
    3336  /*
    3337  #define VMA_DEBUG_LOG(format, ...) do { \
    3338  printf(format, __VA_ARGS__); \
    3339  printf("\n"); \
    3340  } while(false)
    3341  */
    3342 #endif
    3343 
    3344 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    3345 #if VMA_STATS_STRING_ENABLED
    3346  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    3347  {
    3348  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    3349  }
    3350  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    3351  {
    3352  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    3353  }
    3354  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    3355  {
    3356  snprintf(outStr, strLen, "%p", ptr);
    3357  }
    3358 #endif
    3359 
    3360 #ifndef VMA_MUTEX
    3361  class VmaMutex
    3362  {
    3363  public:
    3364  void Lock() { m_Mutex.lock(); }
    3365  void Unlock() { m_Mutex.unlock(); }
    3366  private:
    3367  std::mutex m_Mutex;
    3368  };
    3369  #define VMA_MUTEX VmaMutex
    3370 #endif
    3371 
    3372 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
    3373 #ifndef VMA_RW_MUTEX
    3374  #if VMA_USE_STL_SHARED_MUTEX
    3375  // Use std::shared_mutex from C++17.
    3376  #include <shared_mutex>
    3377  class VmaRWMutex
    3378  {
    3379  public:
    3380  void LockRead() { m_Mutex.lock_shared(); }
    3381  void UnlockRead() { m_Mutex.unlock_shared(); }
    3382  void LockWrite() { m_Mutex.lock(); }
    3383  void UnlockWrite() { m_Mutex.unlock(); }
    3384  private:
    3385  std::shared_mutex m_Mutex;
    3386  };
    3387  #define VMA_RW_MUTEX VmaRWMutex
    3388  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
    3389  // Use SRWLOCK from WinAPI.
    3390  // Minimum supported client = Windows Vista, server = Windows Server 2008.
    3391  class VmaRWMutex
    3392  {
    3393  public:
    3394  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
    3395  void LockRead() { AcquireSRWLockShared(&m_Lock); }
    3396  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
    3397  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
    3398  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
    3399  private:
    3400  SRWLOCK m_Lock;
    3401  };
    3402  #define VMA_RW_MUTEX VmaRWMutex
    3403  #else
    3404  // Less efficient fallback: Use normal mutex.
    3405  class VmaRWMutex
    3406  {
    3407  public:
    3408  void LockRead() { m_Mutex.Lock(); }
    3409  void UnlockRead() { m_Mutex.Unlock(); }
    3410  void LockWrite() { m_Mutex.Lock(); }
    3411  void UnlockWrite() { m_Mutex.Unlock(); }
    3412  private:
    3413  VMA_MUTEX m_Mutex;
    3414  };
    3415  #define VMA_RW_MUTEX VmaRWMutex
    3416  #endif // #if VMA_USE_STL_SHARED_MUTEX
    3417 #endif // #ifndef VMA_RW_MUTEX
    3418 
    3419 /*
    3420 If providing your own implementation, you need to implement a subset of std::atomic:
    3421 
    3422 - Constructor(uint32_t desired)
    3423 - uint32_t load() const
    3424 - void store(uint32_t desired)
    3425 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    3426 */
    3427 #ifndef VMA_ATOMIC_UINT32
    3428  #include <atomic>
    3429  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    3430 #endif
    3431 
    3432 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    3433 
    3437  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    3438 #endif
    3439 
    3440 #ifndef VMA_DEBUG_ALIGNMENT
    3441 
    3445  #define VMA_DEBUG_ALIGNMENT (1)
    3446 #endif
    3447 
    3448 #ifndef VMA_DEBUG_MARGIN
    3449 
    3453  #define VMA_DEBUG_MARGIN (0)
    3454 #endif
    3455 
    3456 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
    3457 
    3461  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
    3462 #endif
    3463 
    3464 #ifndef VMA_DEBUG_DETECT_CORRUPTION
    3465 
    3470  #define VMA_DEBUG_DETECT_CORRUPTION (0)
    3471 #endif
    3472 
    3473 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    3474 
    3478  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    3479 #endif
    3480 
    3481 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    3482 
    3486  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    3487 #endif
    3488 
    3489 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    3490  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
    3492 #endif
    3493 
    3494 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    3495  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
    3497 #endif
    3498 
    3499 #ifndef VMA_CLASS_NO_COPY
    3500  #define VMA_CLASS_NO_COPY(className) \
    3501  private: \
    3502  className(const className&) = delete; \
    3503  className& operator=(const className&) = delete;
    3504 #endif
    3505 
    3506 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    3507 
    3508 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
    3509 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
    3510 
    3511 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
    3512 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
    3513 
    3514 /*******************************************************************************
    3515 END OF CONFIGURATION
    3516 */
    3517 
    3518 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
    3519 
    3520 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    3521  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    3522 
    3523 // Returns number of bits set to 1 in (v).
    3524 static inline uint32_t VmaCountBitsSet(uint32_t v)
    3525 {
    3526  uint32_t c = v - ((v >> 1) & 0x55555555);
    3527  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    3528  c = ((c >> 4) + c) & 0x0F0F0F0F;
    3529  c = ((c >> 8) + c) & 0x00FF00FF;
    3530  c = ((c >> 16) + c) & 0x0000FFFF;
    3531  return c;
    3532 }
    3533 
    3534 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    3535 // Use types like uint32_t, uint64_t as T.
    3536 template <typename T>
    3537 static inline T VmaAlignUp(T val, T align)
    3538 {
    3539  return (val + align - 1) / align * align;
    3540 }
    3541 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
    3542 // Use types like uint32_t, uint64_t as T.
    3543 template <typename T>
    3544 static inline T VmaAlignDown(T val, T align)
    3545 {
    3546  return val / align * align;
    3547 }
    3548 
    3549 // Division with mathematical rounding to nearest number.
    3550 template <typename T>
    3551 static inline T VmaRoundDiv(T x, T y)
    3552 {
    3553  return (x + (y / (T)2)) / y;
    3554 }
    3555 
    3556 /*
    3557 Returns true if given number is a power of two.
    3558 T must be unsigned integer number or signed integer but always nonnegative.
    3559 For 0 returns true.
    3560 */
    3561 template <typename T>
    3562 inline bool VmaIsPow2(T x)
    3563 {
    3564  return (x & (x-1)) == 0;
    3565 }
    3566 
    3567 // Returns smallest power of 2 greater or equal to v.
    3568 static inline uint32_t VmaNextPow2(uint32_t v)
    3569 {
    3570  v--;
    3571  v |= v >> 1;
    3572  v |= v >> 2;
    3573  v |= v >> 4;
    3574  v |= v >> 8;
    3575  v |= v >> 16;
    3576  v++;
    3577  return v;
    3578 }
    3579 static inline uint64_t VmaNextPow2(uint64_t v)
    3580 {
    3581  v--;
    3582  v |= v >> 1;
    3583  v |= v >> 2;
    3584  v |= v >> 4;
    3585  v |= v >> 8;
    3586  v |= v >> 16;
    3587  v |= v >> 32;
    3588  v++;
    3589  return v;
    3590 }
    3591 
    3592 // Returns largest power of 2 less or equal to v.
    3593 static inline uint32_t VmaPrevPow2(uint32_t v)
    3594 {
    3595  v |= v >> 1;
    3596  v |= v >> 2;
    3597  v |= v >> 4;
    3598  v |= v >> 8;
    3599  v |= v >> 16;
    3600  v = v ^ (v >> 1);
    3601  return v;
    3602 }
    3603 static inline uint64_t VmaPrevPow2(uint64_t v)
    3604 {
    3605  v |= v >> 1;
    3606  v |= v >> 2;
    3607  v |= v >> 4;
    3608  v |= v >> 8;
    3609  v |= v >> 16;
    3610  v |= v >> 32;
    3611  v = v ^ (v >> 1);
    3612  return v;
    3613 }
    3614 
    3615 static inline bool VmaStrIsEmpty(const char* pStr)
    3616 {
    3617  return pStr == VMA_NULL || *pStr == '\0';
    3618 }
    3619 
    3620 #if VMA_STATS_STRING_ENABLED
    3621 
    3622 static const char* VmaAlgorithmToStr(uint32_t algorithm)
    3623 {
    3624  switch(algorithm)
    3625  {
    3627  return "Linear";
    3629  return "Buddy";
    3630  case 0:
    3631  return "Default";
    3632  default:
    3633  VMA_ASSERT(0);
    3634  return "";
    3635  }
    3636 }
    3637 
    3638 #endif // #if VMA_STATS_STRING_ENABLED
    3639 
    3640 #ifndef VMA_SORT
    3641 
    3642 template<typename Iterator, typename Compare>
    3643 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    3644 {
    3645  Iterator centerValue = end; --centerValue;
    3646  Iterator insertIndex = beg;
    3647  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    3648  {
    3649  if(cmp(*memTypeIndex, *centerValue))
    3650  {
    3651  if(insertIndex != memTypeIndex)
    3652  {
    3653  VMA_SWAP(*memTypeIndex, *insertIndex);
    3654  }
    3655  ++insertIndex;
    3656  }
    3657  }
    3658  if(insertIndex != centerValue)
    3659  {
    3660  VMA_SWAP(*insertIndex, *centerValue);
    3661  }
    3662  return insertIndex;
    3663 }
    3664 
    3665 template<typename Iterator, typename Compare>
    3666 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    3667 {
    3668  if(beg < end)
    3669  {
    3670  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    3671  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    3672  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    3673  }
    3674 }
    3675 
    3676 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    3677 
    3678 #endif // #ifndef VMA_SORT
    3679 
    3680 /*
    3681 Returns true if two memory blocks occupy overlapping pages.
    3682 ResourceA must be in less memory offset than ResourceB.
    3683 
    3684 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    3685 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    3686 */
    3687 static inline bool VmaBlocksOnSamePage(
    3688  VkDeviceSize resourceAOffset,
    3689  VkDeviceSize resourceASize,
    3690  VkDeviceSize resourceBOffset,
    3691  VkDeviceSize pageSize)
    3692 {
    3693  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    3694  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    3695  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    3696  VkDeviceSize resourceBStart = resourceBOffset;
    3697  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    3698  return resourceAEndPage == resourceBStartPage;
    3699 }
    3700 
    3701 enum VmaSuballocationType
    3702 {
    3703  VMA_SUBALLOCATION_TYPE_FREE = 0,
    3704  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    3705  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    3706  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    3707  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    3708  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    3709  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    3710 };
    3711 
    3712 /*
    3713 Returns true if given suballocation types could conflict and must respect
    3714 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    3715 or linear image and another one is optimal image. If type is unknown, behave
    3716 conservatively.
    3717 */
    3718 static inline bool VmaIsBufferImageGranularityConflict(
    3719  VmaSuballocationType suballocType1,
    3720  VmaSuballocationType suballocType2)
    3721 {
    3722  if(suballocType1 > suballocType2)
    3723  {
    3724  VMA_SWAP(suballocType1, suballocType2);
    3725  }
    3726 
    3727  switch(suballocType1)
    3728  {
    3729  case VMA_SUBALLOCATION_TYPE_FREE:
    3730  return false;
    3731  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    3732  return true;
    3733  case VMA_SUBALLOCATION_TYPE_BUFFER:
    3734  return
    3735  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3736  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3737  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    3738  return
    3739  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3740  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    3741  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3742  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    3743  return
    3744  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3745  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    3746  return false;
    3747  default:
    3748  VMA_ASSERT(0);
    3749  return true;
    3750  }
    3751 }
    3752 
    3753 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
    3754 {
    3755 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
    3756  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
    3757  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3758  for(size_t i = 0; i < numberCount; ++i, ++pDst)
    3759  {
    3760  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
    3761  }
    3762 #else
    3763  // no-op
    3764 #endif
    3765 }
    3766 
    3767 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
    3768 {
    3769 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
    3770  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
    3771  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3772  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
    3773  {
    3774  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
    3775  {
    3776  return false;
    3777  }
    3778  }
    3779 #endif
    3780  return true;
    3781 }
    3782 
    3783 /*
    3784 Fills structure with parameters of an example buffer to be used for transfers
    3785 during GPU memory defragmentation.
    3786 */
    3787 static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
    3788 {
    3789  memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
    3790  outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
    3791  outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
    3792  outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
    3793 }
    3794 
    3795 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    3796 struct VmaMutexLock
    3797 {
    3798  VMA_CLASS_NO_COPY(VmaMutexLock)
    3799 public:
    3800  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
    3801  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3802  { if(m_pMutex) { m_pMutex->Lock(); } }
    3803  ~VmaMutexLock()
    3804  { if(m_pMutex) { m_pMutex->Unlock(); } }
    3805 private:
    3806  VMA_MUTEX* m_pMutex;
    3807 };
    3808 
    3809 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
    3810 struct VmaMutexLockRead
    3811 {
    3812  VMA_CLASS_NO_COPY(VmaMutexLockRead)
    3813 public:
    3814  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
    3815  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3816  { if(m_pMutex) { m_pMutex->LockRead(); } }
    3817  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
    3818 private:
    3819  VMA_RW_MUTEX* m_pMutex;
    3820 };
    3821 
    3822 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
    3823 struct VmaMutexLockWrite
    3824 {
    3825  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
    3826 public:
    3827  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
    3828  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3829  { if(m_pMutex) { m_pMutex->LockWrite(); } }
    3830  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
    3831 private:
    3832  VMA_RW_MUTEX* m_pMutex;
    3833 };
    3834 
    3835 #if VMA_DEBUG_GLOBAL_MUTEX
    3836  static VMA_MUTEX gDebugGlobalMutex;
    3837  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    3838 #else
    3839  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    3840 #endif
    3841 
    3842 // Minimum size of a free suballocation to register it in the free suballocation collection.
    3843 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    3844 
    3845 /*
    3846 Performs binary search and returns iterator to first element that is greater or
    3847 equal to (key), according to comparison (cmp).
    3848 
    3849 Cmp should return true if first argument is less than second argument.
    3850 
    3851 Returned value is the found element, if present in the collection or place where
    3852 new element with value (key) should be inserted.
    3853 */
    3854 template <typename CmpLess, typename IterT, typename KeyT>
    3855 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, const CmpLess& cmp)
    3856 {
    3857  size_t down = 0, up = (end - beg);
    3858  while(down < up)
    3859  {
    3860  const size_t mid = (down + up) / 2;
    3861  if(cmp(*(beg+mid), key))
    3862  {
    3863  down = mid + 1;
    3864  }
    3865  else
    3866  {
    3867  up = mid;
    3868  }
    3869  }
    3870  return beg + down;
    3871 }
    3872 
    3873 template<typename CmpLess, typename IterT, typename KeyT>
    3874 IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
    3875 {
    3876  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
    3877  beg, end, value, cmp);
    3878  if(it == end ||
    3879  (!cmp(*it, value) && !cmp(value, *it)))
    3880  {
    3881  return it;
    3882  }
    3883  return end;
    3884 }
    3885 
    3886 /*
    3887 Returns true if all pointers in the array are not-null and unique.
    3888 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
    3889 T must be pointer type, e.g. VmaAllocation, VmaPool.
    3890 */
    3891 template<typename T>
    3892 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
    3893 {
    3894  for(uint32_t i = 0; i < count; ++i)
    3895  {
    3896  const T iPtr = arr[i];
    3897  if(iPtr == VMA_NULL)
    3898  {
    3899  return false;
    3900  }
    3901  for(uint32_t j = i + 1; j < count; ++j)
    3902  {
    3903  if(iPtr == arr[j])
    3904  {
    3905  return false;
    3906  }
    3907  }
    3908  }
    3909  return true;
    3910 }
    3911 
    3913 // Memory allocation
    3914 
    3915 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    3916 {
    3917  if((pAllocationCallbacks != VMA_NULL) &&
    3918  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    3919  {
    3920  return (*pAllocationCallbacks->pfnAllocation)(
    3921  pAllocationCallbacks->pUserData,
    3922  size,
    3923  alignment,
    3924  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    3925  }
    3926  else
    3927  {
    3928  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    3929  }
    3930 }
    3931 
    3932 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    3933 {
    3934  if((pAllocationCallbacks != VMA_NULL) &&
    3935  (pAllocationCallbacks->pfnFree != VMA_NULL))
    3936  {
    3937  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    3938  }
    3939  else
    3940  {
    3941  VMA_SYSTEM_FREE(ptr);
    3942  }
    3943 }
    3944 
    3945 template<typename T>
    3946 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    3947 {
    3948  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    3949 }
    3950 
    3951 template<typename T>
    3952 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    3953 {
    3954  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    3955 }
    3956 
    3957 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    3958 
    3959 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    3960 
    3961 template<typename T>
    3962 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    3963 {
    3964  ptr->~T();
    3965  VmaFree(pAllocationCallbacks, ptr);
    3966 }
    3967 
    3968 template<typename T>
    3969 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    3970 {
    3971  if(ptr != VMA_NULL)
    3972  {
    3973  for(size_t i = count; i--; )
    3974  {
    3975  ptr[i].~T();
    3976  }
    3977  VmaFree(pAllocationCallbacks, ptr);
    3978  }
    3979 }
    3980 
    3981 // STL-compatible allocator.
    3982 template<typename T>
    3983 class VmaStlAllocator
    3984 {
    3985 public:
    3986  const VkAllocationCallbacks* const m_pCallbacks;
    3987  typedef T value_type;
    3988 
    3989  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    3990  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    3991 
    3992  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    3993  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    3994 
    3995  template<typename U>
    3996  bool operator==(const VmaStlAllocator<U>& rhs) const
    3997  {
    3998  return m_pCallbacks == rhs.m_pCallbacks;
    3999  }
    4000  template<typename U>
    4001  bool operator!=(const VmaStlAllocator<U>& rhs) const
    4002  {
    4003  return m_pCallbacks != rhs.m_pCallbacks;
    4004  }
    4005 
    4006  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    4007 };
    4008 
    4009 #if VMA_USE_STL_VECTOR
    4010 
    4011 #define VmaVector std::vector
    4012 
    4013 template<typename T, typename allocatorT>
    4014 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    4015 {
    4016  vec.insert(vec.begin() + index, item);
    4017 }
    4018 
    4019 template<typename T, typename allocatorT>
    4020 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    4021 {
    4022  vec.erase(vec.begin() + index);
    4023 }
    4024 
    4025 #else // #if VMA_USE_STL_VECTOR
    4026 
    4027 /* Class with interface compatible with subset of std::vector.
    4028 T must be POD because constructors and destructors are not called and memcpy is
    4029 used for these objects. */
    4030 template<typename T, typename AllocatorT>
    4031 class VmaVector
    4032 {
    4033 public:
    4034  typedef T value_type;
    4035 
    4036  VmaVector(const AllocatorT& allocator) :
    4037  m_Allocator(allocator),
    4038  m_pArray(VMA_NULL),
    4039  m_Count(0),
    4040  m_Capacity(0)
    4041  {
    4042  }
    4043 
    4044  VmaVector(size_t count, const AllocatorT& allocator) :
    4045  m_Allocator(allocator),
    4046  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    4047  m_Count(count),
    4048  m_Capacity(count)
    4049  {
    4050  }
    4051 
    4052  VmaVector(const VmaVector<T, AllocatorT>& src) :
    4053  m_Allocator(src.m_Allocator),
    4054  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    4055  m_Count(src.m_Count),
    4056  m_Capacity(src.m_Count)
    4057  {
    4058  if(m_Count != 0)
    4059  {
    4060  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    4061  }
    4062  }
    4063 
    4064  ~VmaVector()
    4065  {
    4066  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    4067  }
    4068 
    4069  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    4070  {
    4071  if(&rhs != this)
    4072  {
    4073  resize(rhs.m_Count);
    4074  if(m_Count != 0)
    4075  {
    4076  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    4077  }
    4078  }
    4079  return *this;
    4080  }
    4081 
    4082  bool empty() const { return m_Count == 0; }
    4083  size_t size() const { return m_Count; }
    4084  T* data() { return m_pArray; }
    4085  const T* data() const { return m_pArray; }
    4086 
    4087  T& operator[](size_t index)
    4088  {
    4089  VMA_HEAVY_ASSERT(index < m_Count);
    4090  return m_pArray[index];
    4091  }
    4092  const T& operator[](size_t index) const
    4093  {
    4094  VMA_HEAVY_ASSERT(index < m_Count);
    4095  return m_pArray[index];
    4096  }
    4097 
    4098  T& front()
    4099  {
    4100  VMA_HEAVY_ASSERT(m_Count > 0);
    4101  return m_pArray[0];
    4102  }
    4103  const T& front() const
    4104  {
    4105  VMA_HEAVY_ASSERT(m_Count > 0);
    4106  return m_pArray[0];
    4107  }
    4108  T& back()
    4109  {
    4110  VMA_HEAVY_ASSERT(m_Count > 0);
    4111  return m_pArray[m_Count - 1];
    4112  }
    4113  const T& back() const
    4114  {
    4115  VMA_HEAVY_ASSERT(m_Count > 0);
    4116  return m_pArray[m_Count - 1];
    4117  }
    4118 
    4119  void reserve(size_t newCapacity, bool freeMemory = false)
    4120  {
    4121  newCapacity = VMA_MAX(newCapacity, m_Count);
    4122 
    4123  if((newCapacity < m_Capacity) && !freeMemory)
    4124  {
    4125  newCapacity = m_Capacity;
    4126  }
    4127 
    4128  if(newCapacity != m_Capacity)
    4129  {
    4130  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    4131  if(m_Count != 0)
    4132  {
    4133  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    4134  }
    4135  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    4136  m_Capacity = newCapacity;
    4137  m_pArray = newArray;
    4138  }
    4139  }
    4140 
    4141  void resize(size_t newCount, bool freeMemory = false)
    4142  {
    4143  size_t newCapacity = m_Capacity;
    4144  if(newCount > m_Capacity)
    4145  {
    4146  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    4147  }
    4148  else if(freeMemory)
    4149  {
    4150  newCapacity = newCount;
    4151  }
    4152 
    4153  if(newCapacity != m_Capacity)
    4154  {
    4155  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    4156  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    4157  if(elementsToCopy != 0)
    4158  {
    4159  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    4160  }
    4161  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    4162  m_Capacity = newCapacity;
    4163  m_pArray = newArray;
    4164  }
    4165 
    4166  m_Count = newCount;
    4167  }
    4168 
    4169  void clear(bool freeMemory = false)
    4170  {
    4171  resize(0, freeMemory);
    4172  }
    4173 
    4174  void insert(size_t index, const T& src)
    4175  {
    4176  VMA_HEAVY_ASSERT(index <= m_Count);
    4177  const size_t oldCount = size();
    4178  resize(oldCount + 1);
    4179  if(index < oldCount)
    4180  {
    4181  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    4182  }
    4183  m_pArray[index] = src;
    4184  }
    4185 
    4186  void remove(size_t index)
    4187  {
    4188  VMA_HEAVY_ASSERT(index < m_Count);
    4189  const size_t oldCount = size();
    4190  if(index < oldCount - 1)
    4191  {
    4192  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    4193  }
    4194  resize(oldCount - 1);
    4195  }
    4196 
    4197  void push_back(const T& src)
    4198  {
    4199  const size_t newIndex = size();
    4200  resize(newIndex + 1);
    4201  m_pArray[newIndex] = src;
    4202  }
    4203 
    4204  void pop_back()
    4205  {
    4206  VMA_HEAVY_ASSERT(m_Count > 0);
    4207  resize(size() - 1);
    4208  }
    4209 
    4210  void push_front(const T& src)
    4211  {
    4212  insert(0, src);
    4213  }
    4214 
    4215  void pop_front()
    4216  {
    4217  VMA_HEAVY_ASSERT(m_Count > 0);
    4218  remove(0);
    4219  }
    4220 
    4221  typedef T* iterator;
    4222 
    4223  iterator begin() { return m_pArray; }
    4224  iterator end() { return m_pArray + m_Count; }
    4225 
    4226 private:
    4227  AllocatorT m_Allocator;
    4228  T* m_pArray;
    4229  size_t m_Count;
    4230  size_t m_Capacity;
    4231 };
    4232 
    4233 template<typename T, typename allocatorT>
    4234 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    4235 {
    4236  vec.insert(index, item);
    4237 }
    4238 
    4239 template<typename T, typename allocatorT>
    4240 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    4241 {
    4242  vec.remove(index);
    4243 }
    4244 
    4245 #endif // #if VMA_USE_STL_VECTOR
    4246 
    4247 template<typename CmpLess, typename VectorT>
    4248 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    4249 {
    4250  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4251  vector.data(),
    4252  vector.data() + vector.size(),
    4253  value,
    4254  CmpLess()) - vector.data();
    4255  VmaVectorInsert(vector, indexToInsert, value);
    4256  return indexToInsert;
    4257 }
    4258 
    4259 template<typename CmpLess, typename VectorT>
    4260 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    4261 {
    4262  CmpLess comparator;
    4263  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    4264  vector.begin(),
    4265  vector.end(),
    4266  value,
    4267  comparator);
    4268  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    4269  {
    4270  size_t indexToRemove = it - vector.begin();
    4271  VmaVectorRemove(vector, indexToRemove);
    4272  return true;
    4273  }
    4274  return false;
    4275 }
    4276 
    4278 // class VmaPoolAllocator
    4279 
    4280 /*
    4281 Allocator for objects of type T using a list of arrays (pools) to speed up
    4282 allocation. Number of elements that can be allocated is not bounded because
    4283 allocator can create multiple blocks.
    4284 */
    4285 template<typename T>
    4286 class VmaPoolAllocator
    4287 {
    4288  VMA_CLASS_NO_COPY(VmaPoolAllocator)
    4289 public:
    4290  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
    4291  ~VmaPoolAllocator();
    4292  void Clear();
    4293  T* Alloc();
    4294  void Free(T* ptr);
    4295 
    4296 private:
    4297  union Item
    4298  {
    4299  uint32_t NextFreeIndex;
    4300  T Value;
    4301  };
    4302 
    4303  struct ItemBlock
    4304  {
    4305  Item* pItems;
    4306  uint32_t Capacity;
    4307  uint32_t FirstFreeIndex;
    4308  };
    4309 
    4310  const VkAllocationCallbacks* m_pAllocationCallbacks;
    4311  const uint32_t m_FirstBlockCapacity;
    4312  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    4313 
    4314  ItemBlock& CreateNewBlock();
    4315 };
    4316 
    4317 template<typename T>
    4318 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
    4319  m_pAllocationCallbacks(pAllocationCallbacks),
    4320  m_FirstBlockCapacity(firstBlockCapacity),
    4321  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    4322 {
    4323  VMA_ASSERT(m_FirstBlockCapacity > 1);
    4324 }
    4325 
    4326 template<typename T>
    4327 VmaPoolAllocator<T>::~VmaPoolAllocator()
    4328 {
    4329  Clear();
    4330 }
    4331 
    4332 template<typename T>
    4333 void VmaPoolAllocator<T>::Clear()
    4334 {
    4335  for(size_t i = m_ItemBlocks.size(); i--; )
    4336  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
    4337  m_ItemBlocks.clear();
    4338 }
    4339 
    4340 template<typename T>
    4341 T* VmaPoolAllocator<T>::Alloc()
    4342 {
    4343  for(size_t i = m_ItemBlocks.size(); i--; )
    4344  {
    4345  ItemBlock& block = m_ItemBlocks[i];
    4346  // This block has some free items: Use first one.
    4347  if(block.FirstFreeIndex != UINT32_MAX)
    4348  {
    4349  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    4350  block.FirstFreeIndex = pItem->NextFreeIndex;
    4351  return &pItem->Value;
    4352  }
    4353  }
    4354 
    4355  // No block has free item: Create new one and use it.
    4356  ItemBlock& newBlock = CreateNewBlock();
    4357  Item* const pItem = &newBlock.pItems[0];
    4358  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    4359  return &pItem->Value;
    4360 }
    4361 
    4362 template<typename T>
    4363 void VmaPoolAllocator<T>::Free(T* ptr)
    4364 {
    4365  // Search all memory blocks to find ptr.
    4366  for(size_t i = m_ItemBlocks.size(); i--; )
    4367  {
    4368  ItemBlock& block = m_ItemBlocks[i];
    4369 
    4370  // Casting to union.
    4371  Item* pItemPtr;
    4372  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    4373 
    4374  // Check if pItemPtr is in address range of this block.
    4375  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
    4376  {
    4377  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    4378  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    4379  block.FirstFreeIndex = index;
    4380  return;
    4381  }
    4382  }
    4383  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    4384 }
    4385 
    4386 template<typename T>
    4387 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    4388 {
    4389  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
    4390  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
    4391 
    4392  const ItemBlock newBlock = {
    4393  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
    4394  newBlockCapacity,
    4395  0 };
    4396 
    4397  m_ItemBlocks.push_back(newBlock);
    4398 
    4399  // Setup singly-linked list of all free items in this block.
    4400  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
    4401  newBlock.pItems[i].NextFreeIndex = i + 1;
    4402  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
    4403  return m_ItemBlocks.back();
    4404 }
    4405 
    4407 // class VmaRawList, VmaList
    4408 
    4409 #if VMA_USE_STL_LIST
    4410 
    4411 #define VmaList std::list
    4412 
    4413 #else // #if VMA_USE_STL_LIST
    4414 
    4415 template<typename T>
    4416 struct VmaListItem
    4417 {
    4418  VmaListItem* pPrev;
    4419  VmaListItem* pNext;
    4420  T Value;
    4421 };
    4422 
    4423 // Doubly linked list.
    4424 template<typename T>
    4425 class VmaRawList
    4426 {
    4427  VMA_CLASS_NO_COPY(VmaRawList)
    4428 public:
    4429  typedef VmaListItem<T> ItemType;
    4430 
    4431  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    4432  ~VmaRawList();
    4433  void Clear();
    4434 
    4435  size_t GetCount() const { return m_Count; }
    4436  bool IsEmpty() const { return m_Count == 0; }
    4437 
    4438  ItemType* Front() { return m_pFront; }
    4439  const ItemType* Front() const { return m_pFront; }
    4440  ItemType* Back() { return m_pBack; }
    4441  const ItemType* Back() const { return m_pBack; }
    4442 
    4443  ItemType* PushBack();
    4444  ItemType* PushFront();
    4445  ItemType* PushBack(const T& value);
    4446  ItemType* PushFront(const T& value);
    4447  void PopBack();
    4448  void PopFront();
    4449 
    4450  // Item can be null - it means PushBack.
    4451  ItemType* InsertBefore(ItemType* pItem);
    4452  // Item can be null - it means PushFront.
    4453  ItemType* InsertAfter(ItemType* pItem);
    4454 
    4455  ItemType* InsertBefore(ItemType* pItem, const T& value);
    4456  ItemType* InsertAfter(ItemType* pItem, const T& value);
    4457 
    4458  void Remove(ItemType* pItem);
    4459 
    4460 private:
    4461  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    4462  VmaPoolAllocator<ItemType> m_ItemAllocator;
    4463  ItemType* m_pFront;
    4464  ItemType* m_pBack;
    4465  size_t m_Count;
    4466 };
    4467 
    4468 template<typename T>
    4469 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    4470  m_pAllocationCallbacks(pAllocationCallbacks),
    4471  m_ItemAllocator(pAllocationCallbacks, 128),
    4472  m_pFront(VMA_NULL),
    4473  m_pBack(VMA_NULL),
    4474  m_Count(0)
    4475 {
    4476 }
    4477 
    4478 template<typename T>
    4479 VmaRawList<T>::~VmaRawList()
    4480 {
    4481  // Intentionally not calling Clear, because that would be unnecessary
    4482  // computations to return all items to m_ItemAllocator as free.
    4483 }
    4484 
    4485 template<typename T>
    4486 void VmaRawList<T>::Clear()
    4487 {
    4488  if(IsEmpty() == false)
    4489  {
    4490  ItemType* pItem = m_pBack;
    4491  while(pItem != VMA_NULL)
    4492  {
    4493  ItemType* const pPrevItem = pItem->pPrev;
    4494  m_ItemAllocator.Free(pItem);
    4495  pItem = pPrevItem;
    4496  }
    4497  m_pFront = VMA_NULL;
    4498  m_pBack = VMA_NULL;
    4499  m_Count = 0;
    4500  }
    4501 }
    4502 
    4503 template<typename T>
    4504 VmaListItem<T>* VmaRawList<T>::PushBack()
    4505 {
    4506  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    4507  pNewItem->pNext = VMA_NULL;
    4508  if(IsEmpty())
    4509  {
    4510  pNewItem->pPrev = VMA_NULL;
    4511  m_pFront = pNewItem;
    4512  m_pBack = pNewItem;
    4513  m_Count = 1;
    4514  }
    4515  else
    4516  {
    4517  pNewItem->pPrev = m_pBack;
    4518  m_pBack->pNext = pNewItem;
    4519  m_pBack = pNewItem;
    4520  ++m_Count;
    4521  }
    4522  return pNewItem;
    4523 }
    4524 
    4525 template<typename T>
    4526 VmaListItem<T>* VmaRawList<T>::PushFront()
    4527 {
    4528  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    4529  pNewItem->pPrev = VMA_NULL;
    4530  if(IsEmpty())
    4531  {
    4532  pNewItem->pNext = VMA_NULL;
    4533  m_pFront = pNewItem;
    4534  m_pBack = pNewItem;
    4535  m_Count = 1;
    4536  }
    4537  else
    4538  {
    4539  pNewItem->pNext = m_pFront;
    4540  m_pFront->pPrev = pNewItem;
    4541  m_pFront = pNewItem;
    4542  ++m_Count;
    4543  }
    4544  return pNewItem;
    4545 }
    4546 
    4547 template<typename T>
    4548 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    4549 {
    4550  ItemType* const pNewItem = PushBack();
    4551  pNewItem->Value = value;
    4552  return pNewItem;
    4553 }
    4554 
    4555 template<typename T>
    4556 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    4557 {
    4558  ItemType* const pNewItem = PushFront();
    4559  pNewItem->Value = value;
    4560  return pNewItem;
    4561 }
    4562 
    4563 template<typename T>
    4564 void VmaRawList<T>::PopBack()
    4565 {
    4566  VMA_HEAVY_ASSERT(m_Count > 0);
    4567  ItemType* const pBackItem = m_pBack;
    4568  ItemType* const pPrevItem = pBackItem->pPrev;
    4569  if(pPrevItem != VMA_NULL)
    4570  {
    4571  pPrevItem->pNext = VMA_NULL;
    4572  }
    4573  m_pBack = pPrevItem;
    4574  m_ItemAllocator.Free(pBackItem);
    4575  --m_Count;
    4576 }
    4577 
    4578 template<typename T>
    4579 void VmaRawList<T>::PopFront()
    4580 {
    4581  VMA_HEAVY_ASSERT(m_Count > 0);
    4582  ItemType* const pFrontItem = m_pFront;
    4583  ItemType* const pNextItem = pFrontItem->pNext;
    4584  if(pNextItem != VMA_NULL)
    4585  {
    4586  pNextItem->pPrev = VMA_NULL;
    4587  }
    4588  m_pFront = pNextItem;
    4589  m_ItemAllocator.Free(pFrontItem);
    4590  --m_Count;
    4591 }
    4592 
    4593 template<typename T>
    4594 void VmaRawList<T>::Remove(ItemType* pItem)
    4595 {
    4596  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    4597  VMA_HEAVY_ASSERT(m_Count > 0);
    4598 
    4599  if(pItem->pPrev != VMA_NULL)
    4600  {
    4601  pItem->pPrev->pNext = pItem->pNext;
    4602  }
    4603  else
    4604  {
    4605  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4606  m_pFront = pItem->pNext;
    4607  }
    4608 
    4609  if(pItem->pNext != VMA_NULL)
    4610  {
    4611  pItem->pNext->pPrev = pItem->pPrev;
    4612  }
    4613  else
    4614  {
    4615  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4616  m_pBack = pItem->pPrev;
    4617  }
    4618 
    4619  m_ItemAllocator.Free(pItem);
    4620  --m_Count;
    4621 }
    4622 
    4623 template<typename T>
    4624 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    4625 {
    4626  if(pItem != VMA_NULL)
    4627  {
    4628  ItemType* const prevItem = pItem->pPrev;
    4629  ItemType* const newItem = m_ItemAllocator.Alloc();
    4630  newItem->pPrev = prevItem;
    4631  newItem->pNext = pItem;
    4632  pItem->pPrev = newItem;
    4633  if(prevItem != VMA_NULL)
    4634  {
    4635  prevItem->pNext = newItem;
    4636  }
    4637  else
    4638  {
    4639  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4640  m_pFront = newItem;
    4641  }
    4642  ++m_Count;
    4643  return newItem;
    4644  }
    4645  else
    4646  return PushBack();
    4647 }
    4648 
    4649 template<typename T>
    4650 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    4651 {
    4652  if(pItem != VMA_NULL)
    4653  {
    4654  ItemType* const nextItem = pItem->pNext;
    4655  ItemType* const newItem = m_ItemAllocator.Alloc();
    4656  newItem->pNext = nextItem;
    4657  newItem->pPrev = pItem;
    4658  pItem->pNext = newItem;
    4659  if(nextItem != VMA_NULL)
    4660  {
    4661  nextItem->pPrev = newItem;
    4662  }
    4663  else
    4664  {
    4665  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4666  m_pBack = newItem;
    4667  }
    4668  ++m_Count;
    4669  return newItem;
    4670  }
    4671  else
    4672  return PushFront();
    4673 }
    4674 
    4675 template<typename T>
    4676 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    4677 {
    4678  ItemType* const newItem = InsertBefore(pItem);
    4679  newItem->Value = value;
    4680  return newItem;
    4681 }
    4682 
    4683 template<typename T>
    4684 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    4685 {
    4686  ItemType* const newItem = InsertAfter(pItem);
    4687  newItem->Value = value;
    4688  return newItem;
    4689 }
    4690 
    4691 template<typename T, typename AllocatorT>
    4692 class VmaList
    4693 {
    4694  VMA_CLASS_NO_COPY(VmaList)
    4695 public:
    4696  class iterator
    4697  {
    4698  public:
    4699  iterator() :
    4700  m_pList(VMA_NULL),
    4701  m_pItem(VMA_NULL)
    4702  {
    4703  }
    4704 
    4705  T& operator*() const
    4706  {
    4707  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4708  return m_pItem->Value;
    4709  }
    4710  T* operator->() const
    4711  {
    4712  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4713  return &m_pItem->Value;
    4714  }
    4715 
    4716  iterator& operator++()
    4717  {
    4718  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4719  m_pItem = m_pItem->pNext;
    4720  return *this;
    4721  }
    4722  iterator& operator--()
    4723  {
    4724  if(m_pItem != VMA_NULL)
    4725  {
    4726  m_pItem = m_pItem->pPrev;
    4727  }
    4728  else
    4729  {
    4730  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4731  m_pItem = m_pList->Back();
    4732  }
    4733  return *this;
    4734  }
    4735 
    4736  iterator operator++(int)
    4737  {
    4738  iterator result = *this;
    4739  ++*this;
    4740  return result;
    4741  }
    4742  iterator operator--(int)
    4743  {
    4744  iterator result = *this;
    4745  --*this;
    4746  return result;
    4747  }
    4748 
    4749  bool operator==(const iterator& rhs) const
    4750  {
    4751  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4752  return m_pItem == rhs.m_pItem;
    4753  }
    4754  bool operator!=(const iterator& rhs) const
    4755  {
    4756  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4757  return m_pItem != rhs.m_pItem;
    4758  }
    4759 
    4760  private:
    4761  VmaRawList<T>* m_pList;
    4762  VmaListItem<T>* m_pItem;
    4763 
    4764  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    4765  m_pList(pList),
    4766  m_pItem(pItem)
    4767  {
    4768  }
    4769 
    4770  friend class VmaList<T, AllocatorT>;
    4771  };
    4772 
    4773  class const_iterator
    4774  {
    4775  public:
    4776  const_iterator() :
    4777  m_pList(VMA_NULL),
    4778  m_pItem(VMA_NULL)
    4779  {
    4780  }
    4781 
    4782  const_iterator(const iterator& src) :
    4783  m_pList(src.m_pList),
    4784  m_pItem(src.m_pItem)
    4785  {
    4786  }
    4787 
    4788  const T& operator*() const
    4789  {
    4790  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4791  return m_pItem->Value;
    4792  }
    4793  const T* operator->() const
    4794  {
    4795  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4796  return &m_pItem->Value;
    4797  }
    4798 
    4799  const_iterator& operator++()
    4800  {
    4801  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4802  m_pItem = m_pItem->pNext;
    4803  return *this;
    4804  }
    4805  const_iterator& operator--()
    4806  {
    4807  if(m_pItem != VMA_NULL)
    4808  {
    4809  m_pItem = m_pItem->pPrev;
    4810  }
    4811  else
    4812  {
    4813  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4814  m_pItem = m_pList->Back();
    4815  }
    4816  return *this;
    4817  }
    4818 
    4819  const_iterator operator++(int)
    4820  {
    4821  const_iterator result = *this;
    4822  ++*this;
    4823  return result;
    4824  }
    4825  const_iterator operator--(int)
    4826  {
    4827  const_iterator result = *this;
    4828  --*this;
    4829  return result;
    4830  }
    4831 
    4832  bool operator==(const const_iterator& rhs) const
    4833  {
    4834  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4835  return m_pItem == rhs.m_pItem;
    4836  }
    4837  bool operator!=(const const_iterator& rhs) const
    4838  {
    4839  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4840  return m_pItem != rhs.m_pItem;
    4841  }
    4842 
    4843  private:
    4844  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    4845  m_pList(pList),
    4846  m_pItem(pItem)
    4847  {
    4848  }
    4849 
    4850  const VmaRawList<T>* m_pList;
    4851  const VmaListItem<T>* m_pItem;
    4852 
    4853  friend class VmaList<T, AllocatorT>;
    4854  };
    4855 
    4856  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    4857 
    4858  bool empty() const { return m_RawList.IsEmpty(); }
    4859  size_t size() const { return m_RawList.GetCount(); }
    4860 
    4861  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    4862  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    4863 
    4864  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    4865  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    4866 
    4867  void clear() { m_RawList.Clear(); }
    4868  void push_back(const T& value) { m_RawList.PushBack(value); }
    4869  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    4870  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    4871 
    4872 private:
    4873  VmaRawList<T> m_RawList;
    4874 };
    4875 
    4876 #endif // #if VMA_USE_STL_LIST
    4877 
    4879 // class VmaMap
    4880 
    4881 // Unused in this version.
    4882 #if 0
    4883 
    4884 #if VMA_USE_STL_UNORDERED_MAP
    4885 
    4886 #define VmaPair std::pair
    4887 
    4888 #define VMA_MAP_TYPE(KeyT, ValueT) \
    4889  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    4890 
    4891 #else // #if VMA_USE_STL_UNORDERED_MAP
    4892 
    4893 template<typename T1, typename T2>
    4894 struct VmaPair
    4895 {
    4896  T1 first;
    4897  T2 second;
    4898 
    4899  VmaPair() : first(), second() { }
    4900  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    4901 };
    4902 
    4903 /* Class compatible with subset of interface of std::unordered_map.
    4904 KeyT, ValueT must be POD because they will be stored in VmaVector.
    4905 */
    4906 template<typename KeyT, typename ValueT>
    4907 class VmaMap
    4908 {
    4909 public:
    4910  typedef VmaPair<KeyT, ValueT> PairType;
    4911  typedef PairType* iterator;
    4912 
    4913  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    4914 
    4915  iterator begin() { return m_Vector.begin(); }
    4916  iterator end() { return m_Vector.end(); }
    4917 
    4918  void insert(const PairType& pair);
    4919  iterator find(const KeyT& key);
    4920  void erase(iterator it);
    4921 
    4922 private:
    4923  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    4924 };
    4925 
    4926 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    4927 
    4928 template<typename FirstT, typename SecondT>
    4929 struct VmaPairFirstLess
    4930 {
    4931  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    4932  {
    4933  return lhs.first < rhs.first;
    4934  }
    4935  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    4936  {
    4937  return lhs.first < rhsFirst;
    4938  }
    4939 };
    4940 
    4941 template<typename KeyT, typename ValueT>
    4942 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    4943 {
    4944  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4945  m_Vector.data(),
    4946  m_Vector.data() + m_Vector.size(),
    4947  pair,
    4948  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    4949  VmaVectorInsert(m_Vector, indexToInsert, pair);
    4950 }
    4951 
    4952 template<typename KeyT, typename ValueT>
    4953 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    4954 {
    4955  PairType* it = VmaBinaryFindFirstNotLess(
    4956  m_Vector.data(),
    4957  m_Vector.data() + m_Vector.size(),
    4958  key,
    4959  VmaPairFirstLess<KeyT, ValueT>());
    4960  if((it != m_Vector.end()) && (it->first == key))
    4961  {
    4962  return it;
    4963  }
    4964  else
    4965  {
    4966  return m_Vector.end();
    4967  }
    4968 }
    4969 
    4970 template<typename KeyT, typename ValueT>
    4971 void VmaMap<KeyT, ValueT>::erase(iterator it)
    4972 {
    4973  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    4974 }
    4975 
    4976 #endif // #if VMA_USE_STL_UNORDERED_MAP
    4977 
    4978 #endif // #if 0
    4979 
    4981 
    4982 class VmaDeviceMemoryBlock;
    4983 
    4984 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
    4985 
    4986 struct VmaAllocation_T
    4987 {
    4988 private:
    4989  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    4990 
    4991  enum FLAGS
    4992  {
    4993  FLAG_USER_DATA_STRING = 0x01,
    4994  };
    4995 
    4996 public:
    4997  enum ALLOCATION_TYPE
    4998  {
    4999  ALLOCATION_TYPE_NONE,
    5000  ALLOCATION_TYPE_BLOCK,
    5001  ALLOCATION_TYPE_DEDICATED,
    5002  };
    5003 
    5004  /*
    5005  This struct cannot have constructor or destructor. It must be POD because it is
    5006  allocated using VmaPoolAllocator.
    5007  */
    5008 
    5009  void Ctor(uint32_t currentFrameIndex, bool userDataString)
    5010  {
    5011  m_Alignment = 1;
    5012  m_Size = 0;
    5013  m_pUserData = VMA_NULL;
    5014  m_LastUseFrameIndex = currentFrameIndex;
    5015  m_Type = (uint8_t)ALLOCATION_TYPE_NONE;
    5016  m_SuballocationType = (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN;
    5017  m_MapCount = 0;
    5018  m_Flags = userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0;
    5019 
    5020 #if VMA_STATS_STRING_ENABLED
    5021  m_CreationFrameIndex = currentFrameIndex;
    5022  m_BufferImageUsage = 0;
    5023 #endif
    5024  }
    5025 
    5026  void Dtor()
    5027  {
    5028  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    5029 
    5030  // Check if owned string was freed.
    5031  VMA_ASSERT(m_pUserData == VMA_NULL);
    5032  }
    5033 
    5034  void InitBlockAllocation(
    5035  VmaDeviceMemoryBlock* block,
    5036  VkDeviceSize offset,
    5037  VkDeviceSize alignment,
    5038  VkDeviceSize size,
    5039  VmaSuballocationType suballocationType,
    5040  bool mapped,
    5041  bool canBecomeLost)
    5042  {
    5043  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    5044  VMA_ASSERT(block != VMA_NULL);
    5045  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    5046  m_Alignment = alignment;
    5047  m_Size = size;
    5048  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    5049  m_SuballocationType = (uint8_t)suballocationType;
    5050  m_BlockAllocation.m_Block = block;
    5051  m_BlockAllocation.m_Offset = offset;
    5052  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    5053  }
    5054 
    5055  void InitLost()
    5056  {
    5057  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    5058  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    5059  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    5060  m_BlockAllocation.m_Block = VMA_NULL;
    5061  m_BlockAllocation.m_Offset = 0;
    5062  m_BlockAllocation.m_CanBecomeLost = true;
    5063  }
    5064 
    5065  void ChangeBlockAllocation(
    5066  VmaAllocator hAllocator,
    5067  VmaDeviceMemoryBlock* block,
    5068  VkDeviceSize offset);
    5069 
    5070  void ChangeSize(VkDeviceSize newSize);
    5071  void ChangeOffset(VkDeviceSize newOffset);
    5072 
    5073  // pMappedData not null means allocation is created with MAPPED flag.
    5074  void InitDedicatedAllocation(
    5075  uint32_t memoryTypeIndex,
    5076  VkDeviceMemory hMemory,
    5077  VmaSuballocationType suballocationType,
    5078  void* pMappedData,
    5079  VkDeviceSize size)
    5080  {
    5081  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    5082  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    5083  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    5084  m_Alignment = 0;
    5085  m_Size = size;
    5086  m_SuballocationType = (uint8_t)suballocationType;
    5087  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    5088  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    5089  m_DedicatedAllocation.m_hMemory = hMemory;
    5090  m_DedicatedAllocation.m_pMappedData = pMappedData;
    5091  }
    5092 
    5093  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    5094  VkDeviceSize GetAlignment() const { return m_Alignment; }
    5095  VkDeviceSize GetSize() const { return m_Size; }
    5096  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
    5097  void* GetUserData() const { return m_pUserData; }
    5098  void SetUserData(VmaAllocator hAllocator, void* pUserData);
    5099  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    5100 
    5101  VmaDeviceMemoryBlock* GetBlock() const
    5102  {
    5103  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    5104  return m_BlockAllocation.m_Block;
    5105  }
    5106  VkDeviceSize GetOffset() const;
    5107  VkDeviceMemory GetMemory() const;
    5108  uint32_t GetMemoryTypeIndex() const;
    5109  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    5110  void* GetMappedData() const;
    5111  bool CanBecomeLost() const;
    5112 
    5113  uint32_t GetLastUseFrameIndex() const
    5114  {
    5115  return m_LastUseFrameIndex.load();
    5116  }
    5117  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    5118  {
    5119  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    5120  }
    5121  /*
    5122  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    5123  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    5124  - Else, returns false.
    5125 
    5126  If hAllocation is already lost, assert - you should not call it then.
    5127  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    5128  */
    5129  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5130 
    5131  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    5132  {
    5133  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    5134  outInfo.blockCount = 1;
    5135  outInfo.allocationCount = 1;
    5136  outInfo.unusedRangeCount = 0;
    5137  outInfo.usedBytes = m_Size;
    5138  outInfo.unusedBytes = 0;
    5139  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    5140  outInfo.unusedRangeSizeMin = UINT64_MAX;
    5141  outInfo.unusedRangeSizeMax = 0;
    5142  }
    5143 
    5144  void BlockAllocMap();
    5145  void BlockAllocUnmap();
    5146  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    5147  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    5148 
    5149 #if VMA_STATS_STRING_ENABLED
    5150  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
    5151  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
    5152 
    5153  void InitBufferImageUsage(uint32_t bufferImageUsage)
    5154  {
    5155  VMA_ASSERT(m_BufferImageUsage == 0);
    5156  m_BufferImageUsage = bufferImageUsage;
    5157  }
    5158 
    5159  void PrintParameters(class VmaJsonWriter& json) const;
    5160 #endif
    5161 
    5162 private:
    5163  VkDeviceSize m_Alignment;
    5164  VkDeviceSize m_Size;
    5165  void* m_pUserData;
    5166  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    5167  uint8_t m_Type; // ALLOCATION_TYPE
    5168  uint8_t m_SuballocationType; // VmaSuballocationType
    5169  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    5170  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
    5171  uint8_t m_MapCount;
    5172  uint8_t m_Flags; // enum FLAGS
    5173 
    5174  // Allocation out of VmaDeviceMemoryBlock.
    5175  struct BlockAllocation
    5176  {
    5177  VmaDeviceMemoryBlock* m_Block;
    5178  VkDeviceSize m_Offset;
    5179  bool m_CanBecomeLost;
    5180  };
    5181 
    5182  // Allocation for an object that has its own private VkDeviceMemory.
    5183  struct DedicatedAllocation
    5184  {
    5185  uint32_t m_MemoryTypeIndex;
    5186  VkDeviceMemory m_hMemory;
    5187  void* m_pMappedData; // Not null means memory is mapped.
    5188  };
    5189 
    5190  union
    5191  {
    5192  // Allocation out of VmaDeviceMemoryBlock.
    5193  BlockAllocation m_BlockAllocation;
    5194  // Allocation for an object that has its own private VkDeviceMemory.
    5195  DedicatedAllocation m_DedicatedAllocation;
    5196  };
    5197 
    5198 #if VMA_STATS_STRING_ENABLED
    5199  uint32_t m_CreationFrameIndex;
    5200  uint32_t m_BufferImageUsage; // 0 if unknown.
    5201 #endif
    5202 
    5203  void FreeUserDataString(VmaAllocator hAllocator);
    5204 };
    5205 
    5206 /*
    5207 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    5208 allocated memory block or free.
    5209 */
    5210 struct VmaSuballocation
    5211 {
    5212  VkDeviceSize offset;
    5213  VkDeviceSize size;
    5214  VmaAllocation hAllocation;
    5215  VmaSuballocationType type;
    5216 };
    5217 
    5218 // Comparator for offsets.
    5219 struct VmaSuballocationOffsetLess
    5220 {
    5221  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    5222  {
    5223  return lhs.offset < rhs.offset;
    5224  }
    5225 };
    5226 struct VmaSuballocationOffsetGreater
    5227 {
    5228  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    5229  {
    5230  return lhs.offset > rhs.offset;
    5231  }
    5232 };
    5233 
    5234 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    5235 
    5236 // Cost of one additional allocation lost, as equivalent in bytes.
    5237 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    5238 
    5239 enum class VmaAllocationRequestType
    5240 {
    5241  Normal,
    5242  // Used by "Linear" algorithm.
    5243  UpperAddress,
    5244  EndOf1st,
    5245  EndOf2nd,
    5246 };
    5247 
    5248 /*
    5249 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    5250 
    5251 If canMakeOtherLost was false:
    5252 - item points to a FREE suballocation.
    5253 - itemsToMakeLostCount is 0.
    5254 
    5255 If canMakeOtherLost was true:
    5256 - item points to first of sequence of suballocations, which are either FREE,
    5257  or point to VmaAllocations that can become lost.
    5258 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    5259  the requested allocation to succeed.
    5260 */
    5261 struct VmaAllocationRequest
    5262 {
    5263  VkDeviceSize offset;
    5264  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    5265  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    5266  VmaSuballocationList::iterator item;
    5267  size_t itemsToMakeLostCount;
    5268  void* customData;
    5269  VmaAllocationRequestType type;
    5270 
    5271  VkDeviceSize CalcCost() const
    5272  {
    5273  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    5274  }
    5275 };
    5276 
    5277 /*
    5278 Data structure used for bookkeeping of allocations and unused ranges of memory
    5279 in a single VkDeviceMemory block.
    5280 */
    5281 class VmaBlockMetadata
    5282 {
    5283 public:
    5284  VmaBlockMetadata(VmaAllocator hAllocator);
    5285  virtual ~VmaBlockMetadata() { }
    5286  virtual void Init(VkDeviceSize size) { m_Size = size; }
    5287 
    5288  // Validates all data structures inside this object. If not valid, returns false.
    5289  virtual bool Validate() const = 0;
    5290  VkDeviceSize GetSize() const { return m_Size; }
    5291  virtual size_t GetAllocationCount() const = 0;
    5292  virtual VkDeviceSize GetSumFreeSize() const = 0;
    5293  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
    5294  // Returns true if this block is empty - contains only single free suballocation.
    5295  virtual bool IsEmpty() const = 0;
    5296 
    5297  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
    5298  // Shouldn't modify blockCount.
    5299  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
    5300 
    5301 #if VMA_STATS_STRING_ENABLED
    5302  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
    5303 #endif
    5304 
    5305  // Tries to find a place for suballocation with given parameters inside this block.
    5306  // If succeeded, fills pAllocationRequest and returns true.
    5307  // If failed, returns false.
    5308  virtual bool CreateAllocationRequest(
    5309  uint32_t currentFrameIndex,
    5310  uint32_t frameInUseCount,
    5311  VkDeviceSize bufferImageGranularity,
    5312  VkDeviceSize allocSize,
    5313  VkDeviceSize allocAlignment,
    5314  bool upperAddress,
    5315  VmaSuballocationType allocType,
    5316  bool canMakeOtherLost,
    5317  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
    5318  uint32_t strategy,
    5319  VmaAllocationRequest* pAllocationRequest) = 0;
    5320 
    5321  virtual bool MakeRequestedAllocationsLost(
    5322  uint32_t currentFrameIndex,
    5323  uint32_t frameInUseCount,
    5324  VmaAllocationRequest* pAllocationRequest) = 0;
    5325 
    5326  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
    5327 
    5328  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
    5329 
    5330  // Makes actual allocation based on request. Request must already be checked and valid.
    5331  virtual void Alloc(
    5332  const VmaAllocationRequest& request,
    5333  VmaSuballocationType type,
    5334  VkDeviceSize allocSize,
    5335  VmaAllocation hAllocation) = 0;
    5336 
    5337  // Frees suballocation assigned to given memory region.
    5338  virtual void Free(const VmaAllocation allocation) = 0;
    5339  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
    5340 
    5341  // Tries to resize (grow or shrink) space for given allocation, in place.
    5342  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
    5343 
    5344 protected:
    5345  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
    5346 
    5347 #if VMA_STATS_STRING_ENABLED
    5348  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
    5349  VkDeviceSize unusedBytes,
    5350  size_t allocationCount,
    5351  size_t unusedRangeCount) const;
    5352  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    5353  VkDeviceSize offset,
    5354  VmaAllocation hAllocation) const;
    5355  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    5356  VkDeviceSize offset,
    5357  VkDeviceSize size) const;
    5358  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
    5359 #endif
    5360 
    5361 private:
    5362  VkDeviceSize m_Size;
    5363  const VkAllocationCallbacks* m_pAllocationCallbacks;
    5364 };
    5365 
    5366 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
    5367  VMA_ASSERT(0 && "Validation failed: " #cond); \
    5368  return false; \
    5369  } } while(false)
    5370 
    5371 class VmaBlockMetadata_Generic : public VmaBlockMetadata
    5372 {
    5373  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
    5374 public:
    5375  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
    5376  virtual ~VmaBlockMetadata_Generic();
    5377  virtual void Init(VkDeviceSize size);
    5378 
    5379  virtual bool Validate() const;
    5380  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    5381  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    5382  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5383  virtual bool IsEmpty() const;
    5384 
    5385  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5386  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5387 
    5388 #if VMA_STATS_STRING_ENABLED
    5389  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5390 #endif
    5391 
    5392  virtual bool CreateAllocationRequest(
    5393  uint32_t currentFrameIndex,
    5394  uint32_t frameInUseCount,
    5395  VkDeviceSize bufferImageGranularity,
    5396  VkDeviceSize allocSize,
    5397  VkDeviceSize allocAlignment,
    5398  bool upperAddress,
    5399  VmaSuballocationType allocType,
    5400  bool canMakeOtherLost,
    5401  uint32_t strategy,
    5402  VmaAllocationRequest* pAllocationRequest);
    5403 
    5404  virtual bool MakeRequestedAllocationsLost(
    5405  uint32_t currentFrameIndex,
    5406  uint32_t frameInUseCount,
    5407  VmaAllocationRequest* pAllocationRequest);
    5408 
    5409  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5410 
    5411  virtual VkResult CheckCorruption(const void* pBlockData);
    5412 
    5413  virtual void Alloc(
    5414  const VmaAllocationRequest& request,
    5415  VmaSuballocationType type,
    5416  VkDeviceSize allocSize,
    5417  VmaAllocation hAllocation);
    5418 
    5419  virtual void Free(const VmaAllocation allocation);
    5420  virtual void FreeAtOffset(VkDeviceSize offset);
    5421 
    5422  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
    5423 
    5425  // For defragmentation
    5426 
    5427  bool IsBufferImageGranularityConflictPossible(
    5428  VkDeviceSize bufferImageGranularity,
    5429  VmaSuballocationType& inOutPrevSuballocType) const;
    5430 
    5431 private:
    5432  friend class VmaDefragmentationAlgorithm_Generic;
    5433  friend class VmaDefragmentationAlgorithm_Fast;
    5434 
    5435  uint32_t m_FreeCount;
    5436  VkDeviceSize m_SumFreeSize;
    5437  VmaSuballocationList m_Suballocations;
    5438  // Suballocations that are free and have size greater than certain threshold.
    5439  // Sorted by size, ascending.
    5440  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    5441 
    5442  bool ValidateFreeSuballocationList() const;
    5443 
    5444  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    5445  // If yes, fills pOffset and returns true. If no, returns false.
    5446  bool CheckAllocation(
    5447  uint32_t currentFrameIndex,
    5448  uint32_t frameInUseCount,
    5449  VkDeviceSize bufferImageGranularity,
    5450  VkDeviceSize allocSize,
    5451  VkDeviceSize allocAlignment,
    5452  VmaSuballocationType allocType,
    5453  VmaSuballocationList::const_iterator suballocItem,
    5454  bool canMakeOtherLost,
    5455  VkDeviceSize* pOffset,
    5456  size_t* itemsToMakeLostCount,
    5457  VkDeviceSize* pSumFreeSize,
    5458  VkDeviceSize* pSumItemSize) const;
    5459  // Given free suballocation, it merges it with following one, which must also be free.
    5460  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    5461  // Releases given suballocation, making it free.
    5462  // Merges it with adjacent free suballocations if applicable.
    5463  // Returns iterator to new free suballocation at this place.
    5464  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    5465  // Given free suballocation, it inserts it into sorted list of
    5466  // m_FreeSuballocationsBySize if it's suitable.
    5467  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    5468  // Given free suballocation, it removes it from sorted list of
    5469  // m_FreeSuballocationsBySize if it's suitable.
    5470  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    5471 };
    5472 
    5473 /*
    5474 Allocations and their references in internal data structure look like this:
    5475 
    5476 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
    5477 
    5478  0 +-------+
    5479  | |
    5480  | |
    5481  | |
    5482  +-------+
    5483  | Alloc | 1st[m_1stNullItemsBeginCount]
    5484  +-------+
    5485  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    5486  +-------+
    5487  | ... |
    5488  +-------+
    5489  | Alloc | 1st[1st.size() - 1]
    5490  +-------+
    5491  | |
    5492  | |
    5493  | |
    5494 GetSize() +-------+
    5495 
    5496 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
    5497 
    5498  0 +-------+
    5499  | Alloc | 2nd[0]
    5500  +-------+
    5501  | Alloc | 2nd[1]
    5502  +-------+
    5503  | ... |
    5504  +-------+
    5505  | Alloc | 2nd[2nd.size() - 1]
    5506  +-------+
    5507  | |
    5508  | |
    5509  | |
    5510  +-------+
    5511  | Alloc | 1st[m_1stNullItemsBeginCount]
    5512  +-------+
    5513  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    5514  +-------+
    5515  | ... |
    5516  +-------+
    5517  | Alloc | 1st[1st.size() - 1]
    5518  +-------+
    5519  | |
    5520 GetSize() +-------+
    5521 
    5522 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
    5523 
    5524  0 +-------+
    5525  | |
    5526  | |
    5527  | |
    5528  +-------+
    5529  | Alloc | 1st[m_1stNullItemsBeginCount]
    5530  +-------+
    5531  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    5532  +-------+
    5533  | ... |
    5534  +-------+
    5535  | Alloc | 1st[1st.size() - 1]
    5536  +-------+
    5537  | |
    5538  | |
    5539  | |
    5540  +-------+
    5541  | Alloc | 2nd[2nd.size() - 1]
    5542  +-------+
    5543  | ... |
    5544  +-------+
    5545  | Alloc | 2nd[1]
    5546  +-------+
    5547  | Alloc | 2nd[0]
    5548 GetSize() +-------+
    5549 
    5550 */
    5551 class VmaBlockMetadata_Linear : public VmaBlockMetadata
    5552 {
    5553  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
    5554 public:
    5555  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
    5556  virtual ~VmaBlockMetadata_Linear();
    5557  virtual void Init(VkDeviceSize size);
    5558 
    5559  virtual bool Validate() const;
    5560  virtual size_t GetAllocationCount() const;
    5561  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    5562  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5563  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
    5564 
    5565  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5566  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5567 
    5568 #if VMA_STATS_STRING_ENABLED
    5569  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5570 #endif
    5571 
    5572  virtual bool CreateAllocationRequest(
    5573  uint32_t currentFrameIndex,
    5574  uint32_t frameInUseCount,
    5575  VkDeviceSize bufferImageGranularity,
    5576  VkDeviceSize allocSize,
    5577  VkDeviceSize allocAlignment,
    5578  bool upperAddress,
    5579  VmaSuballocationType allocType,
    5580  bool canMakeOtherLost,
    5581  uint32_t strategy,
    5582  VmaAllocationRequest* pAllocationRequest);
    5583 
    5584  virtual bool MakeRequestedAllocationsLost(
    5585  uint32_t currentFrameIndex,
    5586  uint32_t frameInUseCount,
    5587  VmaAllocationRequest* pAllocationRequest);
    5588 
    5589  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5590 
    5591  virtual VkResult CheckCorruption(const void* pBlockData);
    5592 
    5593  virtual void Alloc(
    5594  const VmaAllocationRequest& request,
    5595  VmaSuballocationType type,
    5596  VkDeviceSize allocSize,
    5597  VmaAllocation hAllocation);
    5598 
    5599  virtual void Free(const VmaAllocation allocation);
    5600  virtual void FreeAtOffset(VkDeviceSize offset);
    5601 
    5602 private:
    5603  /*
    5604  There are two suballocation vectors, used in ping-pong way.
    5605  The one with index m_1stVectorIndex is called 1st.
    5606  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
    5607  2nd can be non-empty only when 1st is not empty.
    5608  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
    5609  */
    5610  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
    5611 
    5612  enum SECOND_VECTOR_MODE
    5613  {
    5614  SECOND_VECTOR_EMPTY,
    5615  /*
    5616  Suballocations in 2nd vector are created later than the ones in 1st, but they
    5617  all have smaller offset.
    5618  */
    5619  SECOND_VECTOR_RING_BUFFER,
    5620  /*
    5621  Suballocations in 2nd vector are upper side of double stack.
    5622  They all have offsets higher than those in 1st vector.
    5623  Top of this stack means smaller offsets, but higher indices in this vector.
    5624  */
    5625  SECOND_VECTOR_DOUBLE_STACK,
    5626  };
    5627 
    5628  VkDeviceSize m_SumFreeSize;
    5629  SuballocationVectorType m_Suballocations0, m_Suballocations1;
    5630  uint32_t m_1stVectorIndex;
    5631  SECOND_VECTOR_MODE m_2ndVectorMode;
    5632 
    5633  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5634  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5635  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5636  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5637 
    5638  // Number of items in 1st vector with hAllocation = null at the beginning.
    5639  size_t m_1stNullItemsBeginCount;
    5640  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
    5641  size_t m_1stNullItemsMiddleCount;
    5642  // Number of items in 2nd vector with hAllocation = null.
    5643  size_t m_2ndNullItemsCount;
    5644 
    5645  bool ShouldCompact1st() const;
    5646  void CleanupAfterFree();
    5647 
    5648  bool CreateAllocationRequest_LowerAddress(
    5649  uint32_t currentFrameIndex,
    5650  uint32_t frameInUseCount,
    5651  VkDeviceSize bufferImageGranularity,
    5652  VkDeviceSize allocSize,
    5653  VkDeviceSize allocAlignment,
    5654  VmaSuballocationType allocType,
    5655  bool canMakeOtherLost,
    5656  uint32_t strategy,
    5657  VmaAllocationRequest* pAllocationRequest);
    5658  bool CreateAllocationRequest_UpperAddress(
    5659  uint32_t currentFrameIndex,
    5660  uint32_t frameInUseCount,
    5661  VkDeviceSize bufferImageGranularity,
    5662  VkDeviceSize allocSize,
    5663  VkDeviceSize allocAlignment,
    5664  VmaSuballocationType allocType,
    5665  bool canMakeOtherLost,
    5666  uint32_t strategy,
    5667  VmaAllocationRequest* pAllocationRequest);
    5668 };
    5669 
    5670 /*
    5671 - GetSize() is the original size of allocated memory block.
    5672 - m_UsableSize is this size aligned down to a power of two.
    5673  All allocations and calculations happen relative to m_UsableSize.
    5674 - GetUnusableSize() is the difference between them.
    5675  It is repoted as separate, unused range, not available for allocations.
    5676 
    5677 Node at level 0 has size = m_UsableSize.
    5678 Each next level contains nodes with size 2 times smaller than current level.
    5679 m_LevelCount is the maximum number of levels to use in the current object.
    5680 */
    5681 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
    5682 {
    5683  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
    5684 public:
    5685  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
    5686  virtual ~VmaBlockMetadata_Buddy();
    5687  virtual void Init(VkDeviceSize size);
    5688 
    5689  virtual bool Validate() const;
    5690  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
    5691  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
    5692  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5693  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
    5694 
    5695  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5696  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5697 
    5698 #if VMA_STATS_STRING_ENABLED
    5699  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5700 #endif
    5701 
    5702  virtual bool CreateAllocationRequest(
    5703  uint32_t currentFrameIndex,
    5704  uint32_t frameInUseCount,
    5705  VkDeviceSize bufferImageGranularity,
    5706  VkDeviceSize allocSize,
    5707  VkDeviceSize allocAlignment,
    5708  bool upperAddress,
    5709  VmaSuballocationType allocType,
    5710  bool canMakeOtherLost,
    5711  uint32_t strategy,
    5712  VmaAllocationRequest* pAllocationRequest);
    5713 
    5714  virtual bool MakeRequestedAllocationsLost(
    5715  uint32_t currentFrameIndex,
    5716  uint32_t frameInUseCount,
    5717  VmaAllocationRequest* pAllocationRequest);
    5718 
    5719  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5720 
    5721  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
    5722 
    5723  virtual void Alloc(
    5724  const VmaAllocationRequest& request,
    5725  VmaSuballocationType type,
    5726  VkDeviceSize allocSize,
    5727  VmaAllocation hAllocation);
    5728 
    5729  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
    5730  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
    5731 
    5732 private:
    5733  static const VkDeviceSize MIN_NODE_SIZE = 32;
    5734  static const size_t MAX_LEVELS = 30;
    5735 
    5736  struct ValidationContext
    5737  {
    5738  size_t calculatedAllocationCount;
    5739  size_t calculatedFreeCount;
    5740  VkDeviceSize calculatedSumFreeSize;
    5741 
    5742  ValidationContext() :
    5743  calculatedAllocationCount(0),
    5744  calculatedFreeCount(0),
    5745  calculatedSumFreeSize(0) { }
    5746  };
    5747 
    5748  struct Node
    5749  {
    5750  VkDeviceSize offset;
    5751  enum TYPE
    5752  {
    5753  TYPE_FREE,
    5754  TYPE_ALLOCATION,
    5755  TYPE_SPLIT,
    5756  TYPE_COUNT
    5757  } type;
    5758  Node* parent;
    5759  Node* buddy;
    5760 
    5761  union
    5762  {
    5763  struct
    5764  {
    5765  Node* prev;
    5766  Node* next;
    5767  } free;
    5768  struct
    5769  {
    5770  VmaAllocation alloc;
    5771  } allocation;
    5772  struct
    5773  {
    5774  Node* leftChild;
    5775  } split;
    5776  };
    5777  };
    5778 
    5779  // Size of the memory block aligned down to a power of two.
    5780  VkDeviceSize m_UsableSize;
    5781  uint32_t m_LevelCount;
    5782 
    5783  Node* m_Root;
    5784  struct {
    5785  Node* front;
    5786  Node* back;
    5787  } m_FreeList[MAX_LEVELS];
    5788  // Number of nodes in the tree with type == TYPE_ALLOCATION.
    5789  size_t m_AllocationCount;
    5790  // Number of nodes in the tree with type == TYPE_FREE.
    5791  size_t m_FreeCount;
    5792  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
    5793  VkDeviceSize m_SumFreeSize;
    5794 
    5795  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
    5796  void DeleteNode(Node* node);
    5797  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
    5798  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
    5799  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
    5800  // Alloc passed just for validation. Can be null.
    5801  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
    5802  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
    5803  // Adds node to the front of FreeList at given level.
    5804  // node->type must be FREE.
    5805  // node->free.prev, next can be undefined.
    5806  void AddToFreeListFront(uint32_t level, Node* node);
    5807  // Removes node from FreeList at given level.
    5808  // node->type must be FREE.
    5809  // node->free.prev, next stay untouched.
    5810  void RemoveFromFreeList(uint32_t level, Node* node);
    5811 
    5812 #if VMA_STATS_STRING_ENABLED
    5813  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
    5814 #endif
    5815 };
    5816 
    5817 /*
    5818 Represents a single block of device memory (`VkDeviceMemory`) with all the
    5819 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
    5820 
    5821 Thread-safety: This class must be externally synchronized.
    5822 */
    5823 class VmaDeviceMemoryBlock
    5824 {
    5825  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
    5826 public:
    5827  VmaBlockMetadata* m_pMetadata;
    5828 
    5829  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    5830 
    5831  ~VmaDeviceMemoryBlock()
    5832  {
    5833  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    5834  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5835  }
    5836 
    5837  // Always call after construction.
    5838  void Init(
    5839  VmaAllocator hAllocator,
    5840  VmaPool hParentPool,
    5841  uint32_t newMemoryTypeIndex,
    5842  VkDeviceMemory newMemory,
    5843  VkDeviceSize newSize,
    5844  uint32_t id,
    5845  uint32_t algorithm);
    5846  // Always call before destruction.
    5847  void Destroy(VmaAllocator allocator);
    5848 
    5849  VmaPool GetParentPool() const { return m_hParentPool; }
    5850  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
    5851  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5852  uint32_t GetId() const { return m_Id; }
    5853  void* GetMappedData() const { return m_pMappedData; }
    5854 
    5855  // Validates all data structures inside this object. If not valid, returns false.
    5856  bool Validate() const;
    5857 
    5858  VkResult CheckCorruption(VmaAllocator hAllocator);
    5859 
    5860  // ppData can be null.
    5861  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
    5862  void Unmap(VmaAllocator hAllocator, uint32_t count);
    5863 
    5864  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5865  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5866 
    5867  VkResult BindBufferMemory(
    5868  const VmaAllocator hAllocator,
    5869  const VmaAllocation hAllocation,
    5870  VkBuffer hBuffer);
    5871  VkResult BindImageMemory(
    5872  const VmaAllocator hAllocator,
    5873  const VmaAllocation hAllocation,
    5874  VkImage hImage);
    5875 
    5876 private:
    5877  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
    5878  uint32_t m_MemoryTypeIndex;
    5879  uint32_t m_Id;
    5880  VkDeviceMemory m_hMemory;
    5881 
    5882  /*
    5883  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
    5884  Also protects m_MapCount, m_pMappedData.
    5885  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
    5886  */
    5887  VMA_MUTEX m_Mutex;
    5888  uint32_t m_MapCount;
    5889  void* m_pMappedData;
    5890 };
    5891 
    5892 struct VmaPointerLess
    5893 {
    5894  bool operator()(const void* lhs, const void* rhs) const
    5895  {
    5896  return lhs < rhs;
    5897  }
    5898 };
    5899 
    5900 struct VmaDefragmentationMove
    5901 {
    5902  size_t srcBlockIndex;
    5903  size_t dstBlockIndex;
    5904  VkDeviceSize srcOffset;
    5905  VkDeviceSize dstOffset;
    5906  VkDeviceSize size;
    5907 };
    5908 
    5909 class VmaDefragmentationAlgorithm;
    5910 
    5911 /*
    5912 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    5913 Vulkan memory type.
    5914 
    5915 Synchronized internally with a mutex.
    5916 */
    5917 struct VmaBlockVector
    5918 {
    5919  VMA_CLASS_NO_COPY(VmaBlockVector)
    5920 public:
    5921  VmaBlockVector(
    5922  VmaAllocator hAllocator,
    5923  VmaPool hParentPool,
    5924  uint32_t memoryTypeIndex,
    5925  VkDeviceSize preferredBlockSize,
    5926  size_t minBlockCount,
    5927  size_t maxBlockCount,
    5928  VkDeviceSize bufferImageGranularity,
    5929  uint32_t frameInUseCount,
    5930  bool isCustomPool,
    5931  bool explicitBlockSize,
    5932  uint32_t algorithm);
    5933  ~VmaBlockVector();
    5934 
    5935  VkResult CreateMinBlocks();
    5936 
    5937  VmaPool GetParentPool() const { return m_hParentPool; }
    5938  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5939  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    5940  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    5941  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    5942  uint32_t GetAlgorithm() const { return m_Algorithm; }
    5943 
    5944  void GetPoolStats(VmaPoolStats* pStats);
    5945 
    5946  bool IsEmpty() const { return m_Blocks.empty(); }
    5947  bool IsCorruptionDetectionEnabled() const;
    5948 
    5949  VkResult Allocate(
    5950  uint32_t currentFrameIndex,
    5951  VkDeviceSize size,
    5952  VkDeviceSize alignment,
    5953  const VmaAllocationCreateInfo& createInfo,
    5954  VmaSuballocationType suballocType,
    5955  size_t allocationCount,
    5956  VmaAllocation* pAllocations);
    5957 
    5958  void Free(
    5959  VmaAllocation hAllocation);
    5960 
    5961  // Adds statistics of this BlockVector to pStats.
    5962  void AddStats(VmaStats* pStats);
    5963 
    5964 #if VMA_STATS_STRING_ENABLED
    5965  void PrintDetailedMap(class VmaJsonWriter& json);
    5966 #endif
    5967 
    5968  void MakePoolAllocationsLost(
    5969  uint32_t currentFrameIndex,
    5970  size_t* pLostAllocationCount);
    5971  VkResult CheckCorruption();
    5972 
    5973  // Saves results in pCtx->res.
    5974  void Defragment(
    5975  class VmaBlockVectorDefragmentationContext* pCtx,
    5976  VmaDefragmentationStats* pStats,
    5977  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
    5978  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
    5979  VkCommandBuffer commandBuffer);
    5980  void DefragmentationEnd(
    5981  class VmaBlockVectorDefragmentationContext* pCtx,
    5982  VmaDefragmentationStats* pStats);
    5983 
    5985  // To be used only while the m_Mutex is locked. Used during defragmentation.
    5986 
    5987  size_t GetBlockCount() const { return m_Blocks.size(); }
    5988  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
    5989  size_t CalcAllocationCount() const;
    5990  bool IsBufferImageGranularityConflictPossible() const;
    5991 
    5992 private:
    5993  friend class VmaDefragmentationAlgorithm_Generic;
    5994 
    5995  const VmaAllocator m_hAllocator;
    5996  const VmaPool m_hParentPool;
    5997  const uint32_t m_MemoryTypeIndex;
    5998  const VkDeviceSize m_PreferredBlockSize;
    5999  const size_t m_MinBlockCount;
    6000  const size_t m_MaxBlockCount;
    6001  const VkDeviceSize m_BufferImageGranularity;
    6002  const uint32_t m_FrameInUseCount;
    6003  const bool m_IsCustomPool;
    6004  const bool m_ExplicitBlockSize;
    6005  const uint32_t m_Algorithm;
    6006  /* There can be at most one allocation that is completely empty - a
    6007  hysteresis to avoid pessimistic case of alternating creation and destruction
    6008  of a VkDeviceMemory. */
    6009  bool m_HasEmptyBlock;
    6010  VMA_RW_MUTEX m_Mutex;
    6011  // Incrementally sorted by sumFreeSize, ascending.
    6012  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    6013  uint32_t m_NextBlockId;
    6014 
    6015  VkDeviceSize CalcMaxBlockSize() const;
    6016 
    6017  // Finds and removes given block from vector.
    6018  void Remove(VmaDeviceMemoryBlock* pBlock);
    6019 
    6020  // Performs single step in sorting m_Blocks. They may not be fully sorted
    6021  // after this call.
    6022  void IncrementallySortBlocks();
    6023 
    6024  VkResult AllocatePage(
    6025  uint32_t currentFrameIndex,
    6026  VkDeviceSize size,
    6027  VkDeviceSize alignment,
    6028  const VmaAllocationCreateInfo& createInfo,
    6029  VmaSuballocationType suballocType,
    6030  VmaAllocation* pAllocation);
    6031 
    6032  // To be used only without CAN_MAKE_OTHER_LOST flag.
    6033  VkResult AllocateFromBlock(
    6034  VmaDeviceMemoryBlock* pBlock,
    6035  uint32_t currentFrameIndex,
    6036  VkDeviceSize size,
    6037  VkDeviceSize alignment,
    6038  VmaAllocationCreateFlags allocFlags,
    6039  void* pUserData,
    6040  VmaSuballocationType suballocType,
    6041  uint32_t strategy,
    6042  VmaAllocation* pAllocation);
    6043 
    6044  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    6045 
    6046  // Saves result to pCtx->res.
    6047  void ApplyDefragmentationMovesCpu(
    6048  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    6049  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
    6050  // Saves result to pCtx->res.
    6051  void ApplyDefragmentationMovesGpu(
    6052  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    6053  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6054  VkCommandBuffer commandBuffer);
    6055 
    6056  /*
    6057  Used during defragmentation. pDefragmentationStats is optional. It's in/out
    6058  - updated with new data.
    6059  */
    6060  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
    6061 };
    6062 
    6063 struct VmaPool_T
    6064 {
    6065  VMA_CLASS_NO_COPY(VmaPool_T)
    6066 public:
    6067  VmaBlockVector m_BlockVector;
    6068 
    6069  VmaPool_T(
    6070  VmaAllocator hAllocator,
    6071  const VmaPoolCreateInfo& createInfo,
    6072  VkDeviceSize preferredBlockSize);
    6073  ~VmaPool_T();
    6074 
    6075  uint32_t GetId() const { return m_Id; }
    6076  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
    6077 
    6078 #if VMA_STATS_STRING_ENABLED
    6079  //void PrintDetailedMap(class VmaStringBuilder& sb);
    6080 #endif
    6081 
    6082 private:
    6083  uint32_t m_Id;
    6084 };
    6085 
    6086 /*
    6087 Performs defragmentation:
    6088 
    6089 - Updates `pBlockVector->m_pMetadata`.
    6090 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
    6091 - Does not move actual data, only returns requested moves as `moves`.
    6092 */
    6093 class VmaDefragmentationAlgorithm
    6094 {
    6095  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
    6096 public:
    6097  VmaDefragmentationAlgorithm(
    6098  VmaAllocator hAllocator,
    6099  VmaBlockVector* pBlockVector,
    6100  uint32_t currentFrameIndex) :
    6101  m_hAllocator(hAllocator),
    6102  m_pBlockVector(pBlockVector),
    6103  m_CurrentFrameIndex(currentFrameIndex)
    6104  {
    6105  }
    6106  virtual ~VmaDefragmentationAlgorithm()
    6107  {
    6108  }
    6109 
    6110  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
    6111  virtual void AddAll() = 0;
    6112 
    6113  virtual VkResult Defragment(
    6114  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6115  VkDeviceSize maxBytesToMove,
    6116  uint32_t maxAllocationsToMove) = 0;
    6117 
    6118  virtual VkDeviceSize GetBytesMoved() const = 0;
    6119  virtual uint32_t GetAllocationsMoved() const = 0;
    6120 
    6121 protected:
    6122  VmaAllocator const m_hAllocator;
    6123  VmaBlockVector* const m_pBlockVector;
    6124  const uint32_t m_CurrentFrameIndex;
    6125 
    6126  struct AllocationInfo
    6127  {
    6128  VmaAllocation m_hAllocation;
    6129  VkBool32* m_pChanged;
    6130 
    6131  AllocationInfo() :
    6132  m_hAllocation(VK_NULL_HANDLE),
    6133  m_pChanged(VMA_NULL)
    6134  {
    6135  }
    6136  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
    6137  m_hAllocation(hAlloc),
    6138  m_pChanged(pChanged)
    6139  {
    6140  }
    6141  };
    6142 };
    6143 
    6144 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
    6145 {
    6146  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
    6147 public:
    6148  VmaDefragmentationAlgorithm_Generic(
    6149  VmaAllocator hAllocator,
    6150  VmaBlockVector* pBlockVector,
    6151  uint32_t currentFrameIndex,
    6152  bool overlappingMoveSupported);
    6153  virtual ~VmaDefragmentationAlgorithm_Generic();
    6154 
    6155  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    6156  virtual void AddAll() { m_AllAllocations = true; }
    6157 
    6158  virtual VkResult Defragment(
    6159  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6160  VkDeviceSize maxBytesToMove,
    6161  uint32_t maxAllocationsToMove);
    6162 
    6163  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    6164  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    6165 
    6166 private:
    6167  uint32_t m_AllocationCount;
    6168  bool m_AllAllocations;
    6169 
    6170  VkDeviceSize m_BytesMoved;
    6171  uint32_t m_AllocationsMoved;
    6172 
    6173  struct AllocationInfoSizeGreater
    6174  {
    6175  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    6176  {
    6177  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    6178  }
    6179  };
    6180 
    6181  struct AllocationInfoOffsetGreater
    6182  {
    6183  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    6184  {
    6185  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
    6186  }
    6187  };
    6188 
    6189  struct BlockInfo
    6190  {
    6191  size_t m_OriginalBlockIndex;
    6192  VmaDeviceMemoryBlock* m_pBlock;
    6193  bool m_HasNonMovableAllocations;
    6194  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    6195 
    6196  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    6197  m_OriginalBlockIndex(SIZE_MAX),
    6198  m_pBlock(VMA_NULL),
    6199  m_HasNonMovableAllocations(true),
    6200  m_Allocations(pAllocationCallbacks)
    6201  {
    6202  }
    6203 
    6204  void CalcHasNonMovableAllocations()
    6205  {
    6206  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
    6207  const size_t defragmentAllocCount = m_Allocations.size();
    6208  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    6209  }
    6210 
    6211  void SortAllocationsBySizeDescending()
    6212  {
    6213  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    6214  }
    6215 
    6216  void SortAllocationsByOffsetDescending()
    6217  {
    6218  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
    6219  }
    6220  };
    6221 
    6222  struct BlockPointerLess
    6223  {
    6224  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    6225  {
    6226  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    6227  }
    6228  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    6229  {
    6230  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    6231  }
    6232  };
    6233 
    6234  // 1. Blocks with some non-movable allocations go first.
    6235  // 2. Blocks with smaller sumFreeSize go first.
    6236  struct BlockInfoCompareMoveDestination
    6237  {
    6238  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    6239  {
    6240  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    6241  {
    6242  return true;
    6243  }
    6244  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    6245  {
    6246  return false;
    6247  }
    6248  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
    6249  {
    6250  return true;
    6251  }
    6252  return false;
    6253  }
    6254  };
    6255 
    6256  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    6257  BlockInfoVector m_Blocks;
    6258 
    6259  VkResult DefragmentRound(
    6260  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6261  VkDeviceSize maxBytesToMove,
    6262  uint32_t maxAllocationsToMove);
    6263 
    6264  size_t CalcBlocksWithNonMovableCount() const;
    6265 
    6266  static bool MoveMakesSense(
    6267  size_t dstBlockIndex, VkDeviceSize dstOffset,
    6268  size_t srcBlockIndex, VkDeviceSize srcOffset);
    6269 };
    6270 
    6271 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
    6272 {
    6273  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
    6274 public:
    6275  VmaDefragmentationAlgorithm_Fast(
    6276  VmaAllocator hAllocator,
    6277  VmaBlockVector* pBlockVector,
    6278  uint32_t currentFrameIndex,
    6279  bool overlappingMoveSupported);
    6280  virtual ~VmaDefragmentationAlgorithm_Fast();
    6281 
    6282  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
    6283  virtual void AddAll() { m_AllAllocations = true; }
    6284 
    6285  virtual VkResult Defragment(
    6286  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6287  VkDeviceSize maxBytesToMove,
    6288  uint32_t maxAllocationsToMove);
    6289 
    6290  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    6291  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    6292 
    6293 private:
    6294  struct BlockInfo
    6295  {
    6296  size_t origBlockIndex;
    6297  };
    6298 
    6299  class FreeSpaceDatabase
    6300  {
    6301  public:
    6302  FreeSpaceDatabase()
    6303  {
    6304  FreeSpace s = {};
    6305  s.blockInfoIndex = SIZE_MAX;
    6306  for(size_t i = 0; i < MAX_COUNT; ++i)
    6307  {
    6308  m_FreeSpaces[i] = s;
    6309  }
    6310  }
    6311 
    6312  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
    6313  {
    6314  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6315  {
    6316  return;
    6317  }
    6318 
    6319  // Find first invalid or the smallest structure.
    6320  size_t bestIndex = SIZE_MAX;
    6321  for(size_t i = 0; i < MAX_COUNT; ++i)
    6322  {
    6323  // Empty structure.
    6324  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
    6325  {
    6326  bestIndex = i;
    6327  break;
    6328  }
    6329  if(m_FreeSpaces[i].size < size &&
    6330  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
    6331  {
    6332  bestIndex = i;
    6333  }
    6334  }
    6335 
    6336  if(bestIndex != SIZE_MAX)
    6337  {
    6338  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
    6339  m_FreeSpaces[bestIndex].offset = offset;
    6340  m_FreeSpaces[bestIndex].size = size;
    6341  }
    6342  }
    6343 
    6344  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
    6345  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
    6346  {
    6347  size_t bestIndex = SIZE_MAX;
    6348  VkDeviceSize bestFreeSpaceAfter = 0;
    6349  for(size_t i = 0; i < MAX_COUNT; ++i)
    6350  {
    6351  // Structure is valid.
    6352  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
    6353  {
    6354  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
    6355  // Allocation fits into this structure.
    6356  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
    6357  {
    6358  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
    6359  (dstOffset + size);
    6360  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
    6361  {
    6362  bestIndex = i;
    6363  bestFreeSpaceAfter = freeSpaceAfter;
    6364  }
    6365  }
    6366  }
    6367  }
    6368 
    6369  if(bestIndex != SIZE_MAX)
    6370  {
    6371  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
    6372  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
    6373 
    6374  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6375  {
    6376  // Leave this structure for remaining empty space.
    6377  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
    6378  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
    6379  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
    6380  }
    6381  else
    6382  {
    6383  // This structure becomes invalid.
    6384  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
    6385  }
    6386 
    6387  return true;
    6388  }
    6389 
    6390  return false;
    6391  }
    6392 
    6393  private:
    6394  static const size_t MAX_COUNT = 4;
    6395 
    6396  struct FreeSpace
    6397  {
    6398  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
    6399  VkDeviceSize offset;
    6400  VkDeviceSize size;
    6401  } m_FreeSpaces[MAX_COUNT];
    6402  };
    6403 
    6404  const bool m_OverlappingMoveSupported;
    6405 
    6406  uint32_t m_AllocationCount;
    6407  bool m_AllAllocations;
    6408 
    6409  VkDeviceSize m_BytesMoved;
    6410  uint32_t m_AllocationsMoved;
    6411 
    6412  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
    6413 
    6414  void PreprocessMetadata();
    6415  void PostprocessMetadata();
    6416  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
    6417 };
    6418 
    6419 struct VmaBlockDefragmentationContext
    6420 {
    6421  enum BLOCK_FLAG
    6422  {
    6423  BLOCK_FLAG_USED = 0x00000001,
    6424  };
    6425  uint32_t flags;
    6426  VkBuffer hBuffer;
    6427 };
    6428 
    6429 class VmaBlockVectorDefragmentationContext
    6430 {
    6431  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
    6432 public:
    6433  VkResult res;
    6434  bool mutexLocked;
    6435  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
    6436 
    6437  VmaBlockVectorDefragmentationContext(
    6438  VmaAllocator hAllocator,
    6439  VmaPool hCustomPool, // Optional.
    6440  VmaBlockVector* pBlockVector,
    6441  uint32_t currFrameIndex);
    6442  ~VmaBlockVectorDefragmentationContext();
    6443 
    6444  VmaPool GetCustomPool() const { return m_hCustomPool; }
    6445  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
    6446  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
    6447 
    6448  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    6449  void AddAll() { m_AllAllocations = true; }
    6450 
    6451  void Begin(bool overlappingMoveSupported);
    6452 
    6453 private:
    6454  const VmaAllocator m_hAllocator;
    6455  // Null if not from custom pool.
    6456  const VmaPool m_hCustomPool;
    6457  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
    6458  VmaBlockVector* const m_pBlockVector;
    6459  const uint32_t m_CurrFrameIndex;
    6460  // Owner of this object.
    6461  VmaDefragmentationAlgorithm* m_pAlgorithm;
    6462 
    6463  struct AllocInfo
    6464  {
    6465  VmaAllocation hAlloc;
    6466  VkBool32* pChanged;
    6467  };
    6468  // Used between constructor and Begin.
    6469  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
    6470  bool m_AllAllocations;
    6471 };
    6472 
    6473 struct VmaDefragmentationContext_T
    6474 {
    6475 private:
    6476  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
    6477 public:
    6478  VmaDefragmentationContext_T(
    6479  VmaAllocator hAllocator,
    6480  uint32_t currFrameIndex,
    6481  uint32_t flags,
    6482  VmaDefragmentationStats* pStats);
    6483  ~VmaDefragmentationContext_T();
    6484 
    6485  void AddPools(uint32_t poolCount, VmaPool* pPools);
    6486  void AddAllocations(
    6487  uint32_t allocationCount,
    6488  VmaAllocation* pAllocations,
    6489  VkBool32* pAllocationsChanged);
    6490 
    6491  /*
    6492  Returns:
    6493  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
    6494  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
    6495  - Negative value if error occured and object can be destroyed immediately.
    6496  */
    6497  VkResult Defragment(
    6498  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
    6499  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
    6500  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
    6501 
    6502 private:
    6503  const VmaAllocator m_hAllocator;
    6504  const uint32_t m_CurrFrameIndex;
    6505  const uint32_t m_Flags;
    6506  VmaDefragmentationStats* const m_pStats;
    6507  // Owner of these objects.
    6508  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
    6509  // Owner of these objects.
    6510  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
    6511 };
    6512 
    6513 #if VMA_RECORDING_ENABLED
    6514 
    6515 class VmaRecorder
    6516 {
    6517 public:
    6518  VmaRecorder();
    6519  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
    6520  void WriteConfiguration(
    6521  const VkPhysicalDeviceProperties& devProps,
    6522  const VkPhysicalDeviceMemoryProperties& memProps,
    6523  bool dedicatedAllocationExtensionEnabled);
    6524  ~VmaRecorder();
    6525 
    6526  void RecordCreateAllocator(uint32_t frameIndex);
    6527  void RecordDestroyAllocator(uint32_t frameIndex);
    6528  void RecordCreatePool(uint32_t frameIndex,
    6529  const VmaPoolCreateInfo& createInfo,
    6530  VmaPool pool);
    6531  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
    6532  void RecordAllocateMemory(uint32_t frameIndex,
    6533  const VkMemoryRequirements& vkMemReq,
    6534  const VmaAllocationCreateInfo& createInfo,
    6535  VmaAllocation allocation);
    6536  void RecordAllocateMemoryPages(uint32_t frameIndex,
    6537  const VkMemoryRequirements& vkMemReq,
    6538  const VmaAllocationCreateInfo& createInfo,
    6539  uint64_t allocationCount,
    6540  const VmaAllocation* pAllocations);
    6541  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    6542  const VkMemoryRequirements& vkMemReq,
    6543  bool requiresDedicatedAllocation,
    6544  bool prefersDedicatedAllocation,
    6545  const VmaAllocationCreateInfo& createInfo,
    6546  VmaAllocation allocation);
    6547  void RecordAllocateMemoryForImage(uint32_t frameIndex,
    6548  const VkMemoryRequirements& vkMemReq,
    6549  bool requiresDedicatedAllocation,
    6550  bool prefersDedicatedAllocation,
    6551  const VmaAllocationCreateInfo& createInfo,
    6552  VmaAllocation allocation);
    6553  void RecordFreeMemory(uint32_t frameIndex,
    6554  VmaAllocation allocation);
    6555  void RecordFreeMemoryPages(uint32_t frameIndex,
    6556  uint64_t allocationCount,
    6557  const VmaAllocation* pAllocations);
    6558  void RecordResizeAllocation(
    6559  uint32_t frameIndex,
    6560  VmaAllocation allocation,
    6561  VkDeviceSize newSize);
    6562  void RecordSetAllocationUserData(uint32_t frameIndex,
    6563  VmaAllocation allocation,
    6564  const void* pUserData);
    6565  void RecordCreateLostAllocation(uint32_t frameIndex,
    6566  VmaAllocation allocation);
    6567  void RecordMapMemory(uint32_t frameIndex,
    6568  VmaAllocation allocation);
    6569  void RecordUnmapMemory(uint32_t frameIndex,
    6570  VmaAllocation allocation);
    6571  void RecordFlushAllocation(uint32_t frameIndex,
    6572  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    6573  void RecordInvalidateAllocation(uint32_t frameIndex,
    6574  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    6575  void RecordCreateBuffer(uint32_t frameIndex,
    6576  const VkBufferCreateInfo& bufCreateInfo,
    6577  const VmaAllocationCreateInfo& allocCreateInfo,
    6578  VmaAllocation allocation);
    6579  void RecordCreateImage(uint32_t frameIndex,
    6580  const VkImageCreateInfo& imageCreateInfo,
    6581  const VmaAllocationCreateInfo& allocCreateInfo,
    6582  VmaAllocation allocation);
    6583  void RecordDestroyBuffer(uint32_t frameIndex,
    6584  VmaAllocation allocation);
    6585  void RecordDestroyImage(uint32_t frameIndex,
    6586  VmaAllocation allocation);
    6587  void RecordTouchAllocation(uint32_t frameIndex,
    6588  VmaAllocation allocation);
    6589  void RecordGetAllocationInfo(uint32_t frameIndex,
    6590  VmaAllocation allocation);
    6591  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
    6592  VmaPool pool);
    6593  void RecordDefragmentationBegin(uint32_t frameIndex,
    6594  const VmaDefragmentationInfo2& info,
    6596  void RecordDefragmentationEnd(uint32_t frameIndex,
    6598 
    6599 private:
    6600  struct CallParams
    6601  {
    6602  uint32_t threadId;
    6603  double time;
    6604  };
    6605 
    6606  class UserDataString
    6607  {
    6608  public:
    6609  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
    6610  const char* GetString() const { return m_Str; }
    6611 
    6612  private:
    6613  char m_PtrStr[17];
    6614  const char* m_Str;
    6615  };
    6616 
    6617  bool m_UseMutex;
    6618  VmaRecordFlags m_Flags;
    6619  FILE* m_File;
    6620  VMA_MUTEX m_FileMutex;
    6621  int64_t m_Freq;
    6622  int64_t m_StartCounter;
    6623 
    6624  void GetBasicParams(CallParams& outParams);
    6625 
    6626  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
    6627  template<typename T>
    6628  void PrintPointerList(uint64_t count, const T* pItems)
    6629  {
    6630  if(count)
    6631  {
    6632  fprintf(m_File, "%p", pItems[0]);
    6633  for(uint64_t i = 1; i < count; ++i)
    6634  {
    6635  fprintf(m_File, " %p", pItems[i]);
    6636  }
    6637  }
    6638  }
    6639 
    6640  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
    6641  void Flush();
    6642 };
    6643 
    6644 #endif // #if VMA_RECORDING_ENABLED
    6645 
    6646 /*
    6647 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
    6648 */
    6649 class VmaAllocationObjectAllocator
    6650 {
    6651  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
    6652 public:
    6653  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
    6654 
    6655  VmaAllocation Allocate();
    6656  void Free(VmaAllocation hAlloc);
    6657 
    6658 private:
    6659  VMA_MUTEX m_Mutex;
    6660  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
    6661 };
    6662 
    6663 // Main allocator object.
    6664 struct VmaAllocator_T
    6665 {
    6666  VMA_CLASS_NO_COPY(VmaAllocator_T)
    6667 public:
    6668  bool m_UseMutex;
    6669  bool m_UseKhrDedicatedAllocation;
    6670  VkDevice m_hDevice;
    6671  bool m_AllocationCallbacksSpecified;
    6672  VkAllocationCallbacks m_AllocationCallbacks;
    6673  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    6674  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
    6675 
    6676  // Number of bytes free out of limit, or VK_WHOLE_SIZE if no limit for that heap.
    6677  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    6678  VMA_MUTEX m_HeapSizeLimitMutex;
    6679 
    6680  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    6681  VkPhysicalDeviceMemoryProperties m_MemProps;
    6682 
    6683  // Default pools.
    6684  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    6685 
    6686  // Each vector is sorted by memory (handle value).
    6687  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    6688  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    6689  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    6690 
    6691  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    6692  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
    6693  ~VmaAllocator_T();
    6694 
    6695  const VkAllocationCallbacks* GetAllocationCallbacks() const
    6696  {
    6697  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    6698  }
    6699  const VmaVulkanFunctions& GetVulkanFunctions() const
    6700  {
    6701  return m_VulkanFunctions;
    6702  }
    6703 
    6704  VkDeviceSize GetBufferImageGranularity() const
    6705  {
    6706  return VMA_MAX(
    6707  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    6708  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    6709  }
    6710 
    6711  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    6712  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    6713 
    6714  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    6715  {
    6716  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    6717  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    6718  }
    6719  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
    6720  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
    6721  {
    6722  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
    6723  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    6724  }
    6725  // Minimum alignment for all allocations in specific memory type.
    6726  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
    6727  {
    6728  return IsMemoryTypeNonCoherent(memTypeIndex) ?
    6729  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
    6730  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
    6731  }
    6732 
    6733  bool IsIntegratedGpu() const
    6734  {
    6735  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
    6736  }
    6737 
    6738 #if VMA_RECORDING_ENABLED
    6739  VmaRecorder* GetRecorder() const { return m_pRecorder; }
    6740 #endif
    6741 
    6742  void GetBufferMemoryRequirements(
    6743  VkBuffer hBuffer,
    6744  VkMemoryRequirements& memReq,
    6745  bool& requiresDedicatedAllocation,
    6746  bool& prefersDedicatedAllocation) const;
    6747  void GetImageMemoryRequirements(
    6748  VkImage hImage,
    6749  VkMemoryRequirements& memReq,
    6750  bool& requiresDedicatedAllocation,
    6751  bool& prefersDedicatedAllocation) const;
    6752 
    6753  // Main allocation function.
    6754  VkResult AllocateMemory(
    6755  const VkMemoryRequirements& vkMemReq,
    6756  bool requiresDedicatedAllocation,
    6757  bool prefersDedicatedAllocation,
    6758  VkBuffer dedicatedBuffer,
    6759  VkImage dedicatedImage,
    6760  const VmaAllocationCreateInfo& createInfo,
    6761  VmaSuballocationType suballocType,
    6762  size_t allocationCount,
    6763  VmaAllocation* pAllocations);
    6764 
    6765  // Main deallocation function.
    6766  void FreeMemory(
    6767  size_t allocationCount,
    6768  const VmaAllocation* pAllocations);
    6769 
    6770  VkResult ResizeAllocation(
    6771  const VmaAllocation alloc,
    6772  VkDeviceSize newSize);
    6773 
    6774  void CalculateStats(VmaStats* pStats);
    6775 
    6776 #if VMA_STATS_STRING_ENABLED
    6777  void PrintDetailedMap(class VmaJsonWriter& json);
    6778 #endif
    6779 
    6780  VkResult DefragmentationBegin(
    6781  const VmaDefragmentationInfo2& info,
    6782  VmaDefragmentationStats* pStats,
    6783  VmaDefragmentationContext* pContext);
    6784  VkResult DefragmentationEnd(
    6785  VmaDefragmentationContext context);
    6786 
    6787  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    6788  bool TouchAllocation(VmaAllocation hAllocation);
    6789 
    6790  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    6791  void DestroyPool(VmaPool pool);
    6792  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    6793 
    6794  void SetCurrentFrameIndex(uint32_t frameIndex);
    6795  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
    6796 
    6797  void MakePoolAllocationsLost(
    6798  VmaPool hPool,
    6799  size_t* pLostAllocationCount);
    6800  VkResult CheckPoolCorruption(VmaPool hPool);
    6801  VkResult CheckCorruption(uint32_t memoryTypeBits);
    6802 
    6803  void CreateLostAllocation(VmaAllocation* pAllocation);
    6804 
    6805  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    6806  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    6807 
    6808  VkResult Map(VmaAllocation hAllocation, void** ppData);
    6809  void Unmap(VmaAllocation hAllocation);
    6810 
    6811  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
    6812  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
    6813 
    6814  void FlushOrInvalidateAllocation(
    6815  VmaAllocation hAllocation,
    6816  VkDeviceSize offset, VkDeviceSize size,
    6817  VMA_CACHE_OPERATION op);
    6818 
    6819  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
    6820 
    6821  /*
    6822  Returns bit mask of memory types that can support defragmentation on GPU as
    6823  they support creation of required buffer for copy operations.
    6824  */
    6825  uint32_t GetGpuDefragmentationMemoryTypeBits();
    6826 
    6827 private:
    6828  VkDeviceSize m_PreferredLargeHeapBlockSize;
    6829 
    6830  VkPhysicalDevice m_PhysicalDevice;
    6831  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    6832  VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
    6833 
    6834  VMA_RW_MUTEX m_PoolsMutex;
    6835  // Protected by m_PoolsMutex. Sorted by pointer value.
    6836  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    6837  uint32_t m_NextPoolId;
    6838 
    6839  VmaVulkanFunctions m_VulkanFunctions;
    6840 
    6841 #if VMA_RECORDING_ENABLED
    6842  VmaRecorder* m_pRecorder;
    6843 #endif
    6844 
    6845  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    6846 
    6847  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    6848 
    6849  VkResult AllocateMemoryOfType(
    6850  VkDeviceSize size,
    6851  VkDeviceSize alignment,
    6852  bool dedicatedAllocation,
    6853  VkBuffer dedicatedBuffer,
    6854  VkImage dedicatedImage,
    6855  const VmaAllocationCreateInfo& createInfo,
    6856  uint32_t memTypeIndex,
    6857  VmaSuballocationType suballocType,
    6858  size_t allocationCount,
    6859  VmaAllocation* pAllocations);
    6860 
    6861  // Helper function only to be used inside AllocateDedicatedMemory.
    6862  VkResult AllocateDedicatedMemoryPage(
    6863  VkDeviceSize size,
    6864  VmaSuballocationType suballocType,
    6865  uint32_t memTypeIndex,
    6866  const VkMemoryAllocateInfo& allocInfo,
    6867  bool map,
    6868  bool isUserDataString,
    6869  void* pUserData,
    6870  VmaAllocation* pAllocation);
    6871 
    6872  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
    6873  VkResult AllocateDedicatedMemory(
    6874  VkDeviceSize size,
    6875  VmaSuballocationType suballocType,
    6876  uint32_t memTypeIndex,
    6877  bool map,
    6878  bool isUserDataString,
    6879  void* pUserData,
    6880  VkBuffer dedicatedBuffer,
    6881  VkImage dedicatedImage,
    6882  size_t allocationCount,
    6883  VmaAllocation* pAllocations);
    6884 
    6885  void FreeDedicatedMemory(VmaAllocation allocation);
    6886 
    6887  /*
    6888  Calculates and returns bit mask of memory types that can support defragmentation
    6889  on GPU as they support creation of required buffer for copy operations.
    6890  */
    6891  uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
    6892 };
    6893 
    6895 // Memory allocation #2 after VmaAllocator_T definition
    6896 
    6897 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    6898 {
    6899  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    6900 }
    6901 
    6902 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    6903 {
    6904  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    6905 }
    6906 
    6907 template<typename T>
    6908 static T* VmaAllocate(VmaAllocator hAllocator)
    6909 {
    6910  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    6911 }
    6912 
    6913 template<typename T>
    6914 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    6915 {
    6916  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    6917 }
    6918 
    6919 template<typename T>
    6920 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    6921 {
    6922  if(ptr != VMA_NULL)
    6923  {
    6924  ptr->~T();
    6925  VmaFree(hAllocator, ptr);
    6926  }
    6927 }
    6928 
    6929 template<typename T>
    6930 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    6931 {
    6932  if(ptr != VMA_NULL)
    6933  {
    6934  for(size_t i = count; i--; )
    6935  ptr[i].~T();
    6936  VmaFree(hAllocator, ptr);
    6937  }
    6938 }
    6939 
    6941 // VmaStringBuilder
    6942 
    6943 #if VMA_STATS_STRING_ENABLED
    6944 
    6945 class VmaStringBuilder
    6946 {
    6947 public:
    6948  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    6949  size_t GetLength() const { return m_Data.size(); }
    6950  const char* GetData() const { return m_Data.data(); }
    6951 
    6952  void Add(char ch) { m_Data.push_back(ch); }
    6953  void Add(const char* pStr);
    6954  void AddNewLine() { Add('\n'); }
    6955  void AddNumber(uint32_t num);
    6956  void AddNumber(uint64_t num);
    6957  void AddPointer(const void* ptr);
    6958 
    6959 private:
    6960  VmaVector< char, VmaStlAllocator<char> > m_Data;
    6961 };
    6962 
    6963 void VmaStringBuilder::Add(const char* pStr)
    6964 {
    6965  const size_t strLen = strlen(pStr);
    6966  if(strLen > 0)
    6967  {
    6968  const size_t oldCount = m_Data.size();
    6969  m_Data.resize(oldCount + strLen);
    6970  memcpy(m_Data.data() + oldCount, pStr, strLen);
    6971  }
    6972 }
    6973 
    6974 void VmaStringBuilder::AddNumber(uint32_t num)
    6975 {
    6976  char buf[11];
    6977  VmaUint32ToStr(buf, sizeof(buf), num);
    6978  Add(buf);
    6979 }
    6980 
    6981 void VmaStringBuilder::AddNumber(uint64_t num)
    6982 {
    6983  char buf[21];
    6984  VmaUint64ToStr(buf, sizeof(buf), num);
    6985  Add(buf);
    6986 }
    6987 
    6988 void VmaStringBuilder::AddPointer(const void* ptr)
    6989 {
    6990  char buf[21];
    6991  VmaPtrToStr(buf, sizeof(buf), ptr);
    6992  Add(buf);
    6993 }
    6994 
    6995 #endif // #if VMA_STATS_STRING_ENABLED
    6996 
    6998 // VmaJsonWriter
    6999 
    7000 #if VMA_STATS_STRING_ENABLED
    7001 
    7002 class VmaJsonWriter
    7003 {
    7004  VMA_CLASS_NO_COPY(VmaJsonWriter)
    7005 public:
    7006  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    7007  ~VmaJsonWriter();
    7008 
    7009  void BeginObject(bool singleLine = false);
    7010  void EndObject();
    7011 
    7012  void BeginArray(bool singleLine = false);
    7013  void EndArray();
    7014 
    7015  void WriteString(const char* pStr);
    7016  void BeginString(const char* pStr = VMA_NULL);
    7017  void ContinueString(const char* pStr);
    7018  void ContinueString(uint32_t n);
    7019  void ContinueString(uint64_t n);
    7020  void ContinueString_Pointer(const void* ptr);
    7021  void EndString(const char* pStr = VMA_NULL);
    7022 
    7023  void WriteNumber(uint32_t n);
    7024  void WriteNumber(uint64_t n);
    7025  void WriteBool(bool b);
    7026  void WriteNull();
    7027 
    7028 private:
    7029  static const char* const INDENT;
    7030 
    7031  enum COLLECTION_TYPE
    7032  {
    7033  COLLECTION_TYPE_OBJECT,
    7034  COLLECTION_TYPE_ARRAY,
    7035  };
    7036  struct StackItem
    7037  {
    7038  COLLECTION_TYPE type;
    7039  uint32_t valueCount;
    7040  bool singleLineMode;
    7041  };
    7042 
    7043  VmaStringBuilder& m_SB;
    7044  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    7045  bool m_InsideString;
    7046 
    7047  void BeginValue(bool isString);
    7048  void WriteIndent(bool oneLess = false);
    7049 };
    7050 
    7051 const char* const VmaJsonWriter::INDENT = " ";
    7052 
    7053 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    7054  m_SB(sb),
    7055  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    7056  m_InsideString(false)
    7057 {
    7058 }
    7059 
    7060 VmaJsonWriter::~VmaJsonWriter()
    7061 {
    7062  VMA_ASSERT(!m_InsideString);
    7063  VMA_ASSERT(m_Stack.empty());
    7064 }
    7065 
    7066 void VmaJsonWriter::BeginObject(bool singleLine)
    7067 {
    7068  VMA_ASSERT(!m_InsideString);
    7069 
    7070  BeginValue(false);
    7071  m_SB.Add('{');
    7072 
    7073  StackItem item;
    7074  item.type = COLLECTION_TYPE_OBJECT;
    7075  item.valueCount = 0;
    7076  item.singleLineMode = singleLine;
    7077  m_Stack.push_back(item);
    7078 }
    7079 
    7080 void VmaJsonWriter::EndObject()
    7081 {
    7082  VMA_ASSERT(!m_InsideString);
    7083 
    7084  WriteIndent(true);
    7085  m_SB.Add('}');
    7086 
    7087  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    7088  m_Stack.pop_back();
    7089 }
    7090 
    7091 void VmaJsonWriter::BeginArray(bool singleLine)
    7092 {
    7093  VMA_ASSERT(!m_InsideString);
    7094 
    7095  BeginValue(false);
    7096  m_SB.Add('[');
    7097 
    7098  StackItem item;
    7099  item.type = COLLECTION_TYPE_ARRAY;
    7100  item.valueCount = 0;
    7101  item.singleLineMode = singleLine;
    7102  m_Stack.push_back(item);
    7103 }
    7104 
    7105 void VmaJsonWriter::EndArray()
    7106 {
    7107  VMA_ASSERT(!m_InsideString);
    7108 
    7109  WriteIndent(true);
    7110  m_SB.Add(']');
    7111 
    7112  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    7113  m_Stack.pop_back();
    7114 }
    7115 
    7116 void VmaJsonWriter::WriteString(const char* pStr)
    7117 {
    7118  BeginString(pStr);
    7119  EndString();
    7120 }
    7121 
    7122 void VmaJsonWriter::BeginString(const char* pStr)
    7123 {
    7124  VMA_ASSERT(!m_InsideString);
    7125 
    7126  BeginValue(true);
    7127  m_SB.Add('"');
    7128  m_InsideString = true;
    7129  if(pStr != VMA_NULL && pStr[0] != '\0')
    7130  {
    7131  ContinueString(pStr);
    7132  }
    7133 }
    7134 
    7135 void VmaJsonWriter::ContinueString(const char* pStr)
    7136 {
    7137  VMA_ASSERT(m_InsideString);
    7138 
    7139  const size_t strLen = strlen(pStr);
    7140  for(size_t i = 0; i < strLen; ++i)
    7141  {
    7142  char ch = pStr[i];
    7143  if(ch == '\\')
    7144  {
    7145  m_SB.Add("\\\\");
    7146  }
    7147  else if(ch == '"')
    7148  {
    7149  m_SB.Add("\\\"");
    7150  }
    7151  else if(ch >= 32)
    7152  {
    7153  m_SB.Add(ch);
    7154  }
    7155  else switch(ch)
    7156  {
    7157  case '\b':
    7158  m_SB.Add("\\b");
    7159  break;
    7160  case '\f':
    7161  m_SB.Add("\\f");
    7162  break;
    7163  case '\n':
    7164  m_SB.Add("\\n");
    7165  break;
    7166  case '\r':
    7167  m_SB.Add("\\r");
    7168  break;
    7169  case '\t':
    7170  m_SB.Add("\\t");
    7171  break;
    7172  default:
    7173  VMA_ASSERT(0 && "Character not currently supported.");
    7174  break;
    7175  }
    7176  }
    7177 }
    7178 
    7179 void VmaJsonWriter::ContinueString(uint32_t n)
    7180 {
    7181  VMA_ASSERT(m_InsideString);
    7182  m_SB.AddNumber(n);
    7183 }
    7184 
    7185 void VmaJsonWriter::ContinueString(uint64_t n)
    7186 {
    7187  VMA_ASSERT(m_InsideString);
    7188  m_SB.AddNumber(n);
    7189 }
    7190 
    7191 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
    7192 {
    7193  VMA_ASSERT(m_InsideString);
    7194  m_SB.AddPointer(ptr);
    7195 }
    7196 
    7197 void VmaJsonWriter::EndString(const char* pStr)
    7198 {
    7199  VMA_ASSERT(m_InsideString);
    7200  if(pStr != VMA_NULL && pStr[0] != '\0')
    7201  {
    7202  ContinueString(pStr);
    7203  }
    7204  m_SB.Add('"');
    7205  m_InsideString = false;
    7206 }
    7207 
    7208 void VmaJsonWriter::WriteNumber(uint32_t n)
    7209 {
    7210  VMA_ASSERT(!m_InsideString);
    7211  BeginValue(false);
    7212  m_SB.AddNumber(n);
    7213 }
    7214 
    7215 void VmaJsonWriter::WriteNumber(uint64_t n)
    7216 {
    7217  VMA_ASSERT(!m_InsideString);
    7218  BeginValue(false);
    7219  m_SB.AddNumber(n);
    7220 }
    7221 
    7222 void VmaJsonWriter::WriteBool(bool b)
    7223 {
    7224  VMA_ASSERT(!m_InsideString);
    7225  BeginValue(false);
    7226  m_SB.Add(b ? "true" : "false");
    7227 }
    7228 
    7229 void VmaJsonWriter::WriteNull()
    7230 {
    7231  VMA_ASSERT(!m_InsideString);
    7232  BeginValue(false);
    7233  m_SB.Add("null");
    7234 }
    7235 
    7236 void VmaJsonWriter::BeginValue(bool isString)
    7237 {
    7238  if(!m_Stack.empty())
    7239  {
    7240  StackItem& currItem = m_Stack.back();
    7241  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    7242  currItem.valueCount % 2 == 0)
    7243  {
    7244  VMA_ASSERT(isString);
    7245  }
    7246 
    7247  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    7248  currItem.valueCount % 2 != 0)
    7249  {
    7250  m_SB.Add(": ");
    7251  }
    7252  else if(currItem.valueCount > 0)
    7253  {
    7254  m_SB.Add(", ");
    7255  WriteIndent();
    7256  }
    7257  else
    7258  {
    7259  WriteIndent();
    7260  }
    7261  ++currItem.valueCount;
    7262  }
    7263 }
    7264 
    7265 void VmaJsonWriter::WriteIndent(bool oneLess)
    7266 {
    7267  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    7268  {
    7269  m_SB.AddNewLine();
    7270 
    7271  size_t count = m_Stack.size();
    7272  if(count > 0 && oneLess)
    7273  {
    7274  --count;
    7275  }
    7276  for(size_t i = 0; i < count; ++i)
    7277  {
    7278  m_SB.Add(INDENT);
    7279  }
    7280  }
    7281 }
    7282 
    7283 #endif // #if VMA_STATS_STRING_ENABLED
    7284 
    7286 
    7287 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
    7288 {
    7289  if(IsUserDataString())
    7290  {
    7291  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
    7292 
    7293  FreeUserDataString(hAllocator);
    7294 
    7295  if(pUserData != VMA_NULL)
    7296  {
    7297  const char* const newStrSrc = (char*)pUserData;
    7298  const size_t newStrLen = strlen(newStrSrc);
    7299  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
    7300  memcpy(newStrDst, newStrSrc, newStrLen + 1);
    7301  m_pUserData = newStrDst;
    7302  }
    7303  }
    7304  else
    7305  {
    7306  m_pUserData = pUserData;
    7307  }
    7308 }
    7309 
    7310 void VmaAllocation_T::ChangeBlockAllocation(
    7311  VmaAllocator hAllocator,
    7312  VmaDeviceMemoryBlock* block,
    7313  VkDeviceSize offset)
    7314 {
    7315  VMA_ASSERT(block != VMA_NULL);
    7316  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    7317 
    7318  // Move mapping reference counter from old block to new block.
    7319  if(block != m_BlockAllocation.m_Block)
    7320  {
    7321  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
    7322  if(IsPersistentMap())
    7323  ++mapRefCount;
    7324  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
    7325  block->Map(hAllocator, mapRefCount, VMA_NULL);
    7326  }
    7327 
    7328  m_BlockAllocation.m_Block = block;
    7329  m_BlockAllocation.m_Offset = offset;
    7330 }
    7331 
    7332 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
    7333 {
    7334  VMA_ASSERT(newSize > 0);
    7335  m_Size = newSize;
    7336 }
    7337 
    7338 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
    7339 {
    7340  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    7341  m_BlockAllocation.m_Offset = newOffset;
    7342 }
    7343 
    7344 VkDeviceSize VmaAllocation_T::GetOffset() const
    7345 {
    7346  switch(m_Type)
    7347  {
    7348  case ALLOCATION_TYPE_BLOCK:
    7349  return m_BlockAllocation.m_Offset;
    7350  case ALLOCATION_TYPE_DEDICATED:
    7351  return 0;
    7352  default:
    7353  VMA_ASSERT(0);
    7354  return 0;
    7355  }
    7356 }
    7357 
    7358 VkDeviceMemory VmaAllocation_T::GetMemory() const
    7359 {
    7360  switch(m_Type)
    7361  {
    7362  case ALLOCATION_TYPE_BLOCK:
    7363  return m_BlockAllocation.m_Block->GetDeviceMemory();
    7364  case ALLOCATION_TYPE_DEDICATED:
    7365  return m_DedicatedAllocation.m_hMemory;
    7366  default:
    7367  VMA_ASSERT(0);
    7368  return VK_NULL_HANDLE;
    7369  }
    7370 }
    7371 
    7372 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    7373 {
    7374  switch(m_Type)
    7375  {
    7376  case ALLOCATION_TYPE_BLOCK:
    7377  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
    7378  case ALLOCATION_TYPE_DEDICATED:
    7379  return m_DedicatedAllocation.m_MemoryTypeIndex;
    7380  default:
    7381  VMA_ASSERT(0);
    7382  return UINT32_MAX;
    7383  }
    7384 }
    7385 
    7386 void* VmaAllocation_T::GetMappedData() const
    7387 {
    7388  switch(m_Type)
    7389  {
    7390  case ALLOCATION_TYPE_BLOCK:
    7391  if(m_MapCount != 0)
    7392  {
    7393  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
    7394  VMA_ASSERT(pBlockData != VMA_NULL);
    7395  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    7396  }
    7397  else
    7398  {
    7399  return VMA_NULL;
    7400  }
    7401  break;
    7402  case ALLOCATION_TYPE_DEDICATED:
    7403  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    7404  return m_DedicatedAllocation.m_pMappedData;
    7405  default:
    7406  VMA_ASSERT(0);
    7407  return VMA_NULL;
    7408  }
    7409 }
    7410 
    7411 bool VmaAllocation_T::CanBecomeLost() const
    7412 {
    7413  switch(m_Type)
    7414  {
    7415  case ALLOCATION_TYPE_BLOCK:
    7416  return m_BlockAllocation.m_CanBecomeLost;
    7417  case ALLOCATION_TYPE_DEDICATED:
    7418  return false;
    7419  default:
    7420  VMA_ASSERT(0);
    7421  return false;
    7422  }
    7423 }
    7424 
    7425 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    7426 {
    7427  VMA_ASSERT(CanBecomeLost());
    7428 
    7429  /*
    7430  Warning: This is a carefully designed algorithm.
    7431  Do not modify unless you really know what you're doing :)
    7432  */
    7433  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    7434  for(;;)
    7435  {
    7436  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    7437  {
    7438  VMA_ASSERT(0);
    7439  return false;
    7440  }
    7441  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    7442  {
    7443  return false;
    7444  }
    7445  else // Last use time earlier than current time.
    7446  {
    7447  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    7448  {
    7449  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    7450  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    7451  return true;
    7452  }
    7453  }
    7454  }
    7455 }
    7456 
    7457 #if VMA_STATS_STRING_ENABLED
    7458 
    7459 // Correspond to values of enum VmaSuballocationType.
    7460 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    7461  "FREE",
    7462  "UNKNOWN",
    7463  "BUFFER",
    7464  "IMAGE_UNKNOWN",
    7465  "IMAGE_LINEAR",
    7466  "IMAGE_OPTIMAL",
    7467 };
    7468 
    7469 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
    7470 {
    7471  json.WriteString("Type");
    7472  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
    7473 
    7474  json.WriteString("Size");
    7475  json.WriteNumber(m_Size);
    7476 
    7477  if(m_pUserData != VMA_NULL)
    7478  {
    7479  json.WriteString("UserData");
    7480  if(IsUserDataString())
    7481  {
    7482  json.WriteString((const char*)m_pUserData);
    7483  }
    7484  else
    7485  {
    7486  json.BeginString();
    7487  json.ContinueString_Pointer(m_pUserData);
    7488  json.EndString();
    7489  }
    7490  }
    7491 
    7492  json.WriteString("CreationFrameIndex");
    7493  json.WriteNumber(m_CreationFrameIndex);
    7494 
    7495  json.WriteString("LastUseFrameIndex");
    7496  json.WriteNumber(GetLastUseFrameIndex());
    7497 
    7498  if(m_BufferImageUsage != 0)
    7499  {
    7500  json.WriteString("Usage");
    7501  json.WriteNumber(m_BufferImageUsage);
    7502  }
    7503 }
    7504 
    7505 #endif
    7506 
    7507 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
    7508 {
    7509  VMA_ASSERT(IsUserDataString());
    7510  if(m_pUserData != VMA_NULL)
    7511  {
    7512  char* const oldStr = (char*)m_pUserData;
    7513  const size_t oldStrLen = strlen(oldStr);
    7514  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
    7515  m_pUserData = VMA_NULL;
    7516  }
    7517 }
    7518 
    7519 void VmaAllocation_T::BlockAllocMap()
    7520 {
    7521  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    7522 
    7523  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    7524  {
    7525  ++m_MapCount;
    7526  }
    7527  else
    7528  {
    7529  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
    7530  }
    7531 }
    7532 
    7533 void VmaAllocation_T::BlockAllocUnmap()
    7534 {
    7535  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    7536 
    7537  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    7538  {
    7539  --m_MapCount;
    7540  }
    7541  else
    7542  {
    7543  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
    7544  }
    7545 }
    7546 
    7547 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    7548 {
    7549  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    7550 
    7551  if(m_MapCount != 0)
    7552  {
    7553  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    7554  {
    7555  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    7556  *ppData = m_DedicatedAllocation.m_pMappedData;
    7557  ++m_MapCount;
    7558  return VK_SUCCESS;
    7559  }
    7560  else
    7561  {
    7562  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    7563  return VK_ERROR_MEMORY_MAP_FAILED;
    7564  }
    7565  }
    7566  else
    7567  {
    7568  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    7569  hAllocator->m_hDevice,
    7570  m_DedicatedAllocation.m_hMemory,
    7571  0, // offset
    7572  VK_WHOLE_SIZE,
    7573  0, // flags
    7574  ppData);
    7575  if(result == VK_SUCCESS)
    7576  {
    7577  m_DedicatedAllocation.m_pMappedData = *ppData;
    7578  m_MapCount = 1;
    7579  }
    7580  return result;
    7581  }
    7582 }
    7583 
    7584 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    7585 {
    7586  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    7587 
    7588  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    7589  {
    7590  --m_MapCount;
    7591  if(m_MapCount == 0)
    7592  {
    7593  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    7594  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    7595  hAllocator->m_hDevice,
    7596  m_DedicatedAllocation.m_hMemory);
    7597  }
    7598  }
    7599  else
    7600  {
    7601  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    7602  }
    7603 }
    7604 
    7605 #if VMA_STATS_STRING_ENABLED
    7606 
    7607 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    7608 {
    7609  json.BeginObject();
    7610 
    7611  json.WriteString("Blocks");
    7612  json.WriteNumber(stat.blockCount);
    7613 
    7614  json.WriteString("Allocations");
    7615  json.WriteNumber(stat.allocationCount);
    7616 
    7617  json.WriteString("UnusedRanges");
    7618  json.WriteNumber(stat.unusedRangeCount);
    7619 
    7620  json.WriteString("UsedBytes");
    7621  json.WriteNumber(stat.usedBytes);
    7622 
    7623  json.WriteString("UnusedBytes");
    7624  json.WriteNumber(stat.unusedBytes);
    7625 
    7626  if(stat.allocationCount > 1)
    7627  {
    7628  json.WriteString("AllocationSize");
    7629  json.BeginObject(true);
    7630  json.WriteString("Min");
    7631  json.WriteNumber(stat.allocationSizeMin);
    7632  json.WriteString("Avg");
    7633  json.WriteNumber(stat.allocationSizeAvg);
    7634  json.WriteString("Max");
    7635  json.WriteNumber(stat.allocationSizeMax);
    7636  json.EndObject();
    7637  }
    7638 
    7639  if(stat.unusedRangeCount > 1)
    7640  {
    7641  json.WriteString("UnusedRangeSize");
    7642  json.BeginObject(true);
    7643  json.WriteString("Min");
    7644  json.WriteNumber(stat.unusedRangeSizeMin);
    7645  json.WriteString("Avg");
    7646  json.WriteNumber(stat.unusedRangeSizeAvg);
    7647  json.WriteString("Max");
    7648  json.WriteNumber(stat.unusedRangeSizeMax);
    7649  json.EndObject();
    7650  }
    7651 
    7652  json.EndObject();
    7653 }
    7654 
    7655 #endif // #if VMA_STATS_STRING_ENABLED
    7656 
    7657 struct VmaSuballocationItemSizeLess
    7658 {
    7659  bool operator()(
    7660  const VmaSuballocationList::iterator lhs,
    7661  const VmaSuballocationList::iterator rhs) const
    7662  {
    7663  return lhs->size < rhs->size;
    7664  }
    7665  bool operator()(
    7666  const VmaSuballocationList::iterator lhs,
    7667  VkDeviceSize rhsSize) const
    7668  {
    7669  return lhs->size < rhsSize;
    7670  }
    7671 };
    7672 
    7673 
    7675 // class VmaBlockMetadata
    7676 
    7677 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    7678  m_Size(0),
    7679  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
    7680 {
    7681 }
    7682 
    7683 #if VMA_STATS_STRING_ENABLED
    7684 
    7685 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
    7686  VkDeviceSize unusedBytes,
    7687  size_t allocationCount,
    7688  size_t unusedRangeCount) const
    7689 {
    7690  json.BeginObject();
    7691 
    7692  json.WriteString("TotalBytes");
    7693  json.WriteNumber(GetSize());
    7694 
    7695  json.WriteString("UnusedBytes");
    7696  json.WriteNumber(unusedBytes);
    7697 
    7698  json.WriteString("Allocations");
    7699  json.WriteNumber((uint64_t)allocationCount);
    7700 
    7701  json.WriteString("UnusedRanges");
    7702  json.WriteNumber((uint64_t)unusedRangeCount);
    7703 
    7704  json.WriteString("Suballocations");
    7705  json.BeginArray();
    7706 }
    7707 
    7708 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    7709  VkDeviceSize offset,
    7710  VmaAllocation hAllocation) const
    7711 {
    7712  json.BeginObject(true);
    7713 
    7714  json.WriteString("Offset");
    7715  json.WriteNumber(offset);
    7716 
    7717  hAllocation->PrintParameters(json);
    7718 
    7719  json.EndObject();
    7720 }
    7721 
    7722 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    7723  VkDeviceSize offset,
    7724  VkDeviceSize size) const
    7725 {
    7726  json.BeginObject(true);
    7727 
    7728  json.WriteString("Offset");
    7729  json.WriteNumber(offset);
    7730 
    7731  json.WriteString("Type");
    7732  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
    7733 
    7734  json.WriteString("Size");
    7735  json.WriteNumber(size);
    7736 
    7737  json.EndObject();
    7738 }
    7739 
    7740 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
    7741 {
    7742  json.EndArray();
    7743  json.EndObject();
    7744 }
    7745 
    7746 #endif // #if VMA_STATS_STRING_ENABLED
    7747 
    7749 // class VmaBlockMetadata_Generic
    7750 
    7751 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
    7752  VmaBlockMetadata(hAllocator),
    7753  m_FreeCount(0),
    7754  m_SumFreeSize(0),
    7755  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7756  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    7757 {
    7758 }
    7759 
    7760 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
    7761 {
    7762 }
    7763 
    7764 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
    7765 {
    7766  VmaBlockMetadata::Init(size);
    7767 
    7768  m_FreeCount = 1;
    7769  m_SumFreeSize = size;
    7770 
    7771  VmaSuballocation suballoc = {};
    7772  suballoc.offset = 0;
    7773  suballoc.size = size;
    7774  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7775  suballoc.hAllocation = VK_NULL_HANDLE;
    7776 
    7777  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    7778  m_Suballocations.push_back(suballoc);
    7779  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    7780  --suballocItem;
    7781  m_FreeSuballocationsBySize.push_back(suballocItem);
    7782 }
    7783 
    7784 bool VmaBlockMetadata_Generic::Validate() const
    7785 {
    7786  VMA_VALIDATE(!m_Suballocations.empty());
    7787 
    7788  // Expected offset of new suballocation as calculated from previous ones.
    7789  VkDeviceSize calculatedOffset = 0;
    7790  // Expected number of free suballocations as calculated from traversing their list.
    7791  uint32_t calculatedFreeCount = 0;
    7792  // Expected sum size of free suballocations as calculated from traversing their list.
    7793  VkDeviceSize calculatedSumFreeSize = 0;
    7794  // Expected number of free suballocations that should be registered in
    7795  // m_FreeSuballocationsBySize calculated from traversing their list.
    7796  size_t freeSuballocationsToRegister = 0;
    7797  // True if previous visited suballocation was free.
    7798  bool prevFree = false;
    7799 
    7800  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    7801  suballocItem != m_Suballocations.cend();
    7802  ++suballocItem)
    7803  {
    7804  const VmaSuballocation& subAlloc = *suballocItem;
    7805 
    7806  // Actual offset of this suballocation doesn't match expected one.
    7807  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
    7808 
    7809  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7810  // Two adjacent free suballocations are invalid. They should be merged.
    7811  VMA_VALIDATE(!prevFree || !currFree);
    7812 
    7813  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
    7814 
    7815  if(currFree)
    7816  {
    7817  calculatedSumFreeSize += subAlloc.size;
    7818  ++calculatedFreeCount;
    7819  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7820  {
    7821  ++freeSuballocationsToRegister;
    7822  }
    7823 
    7824  // Margin required between allocations - every free space must be at least that large.
    7825  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
    7826  }
    7827  else
    7828  {
    7829  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
    7830  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
    7831 
    7832  // Margin required between allocations - previous allocation must be free.
    7833  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
    7834  }
    7835 
    7836  calculatedOffset += subAlloc.size;
    7837  prevFree = currFree;
    7838  }
    7839 
    7840  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    7841  // match expected one.
    7842  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
    7843 
    7844  VkDeviceSize lastSize = 0;
    7845  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    7846  {
    7847  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    7848 
    7849  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    7850  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    7851  // They must be sorted by size ascending.
    7852  VMA_VALIDATE(suballocItem->size >= lastSize);
    7853 
    7854  lastSize = suballocItem->size;
    7855  }
    7856 
    7857  // Check if totals match calculacted values.
    7858  VMA_VALIDATE(ValidateFreeSuballocationList());
    7859  VMA_VALIDATE(calculatedOffset == GetSize());
    7860  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
    7861  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
    7862 
    7863  return true;
    7864 }
    7865 
    7866 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
    7867 {
    7868  if(!m_FreeSuballocationsBySize.empty())
    7869  {
    7870  return m_FreeSuballocationsBySize.back()->size;
    7871  }
    7872  else
    7873  {
    7874  return 0;
    7875  }
    7876 }
    7877 
    7878 bool VmaBlockMetadata_Generic::IsEmpty() const
    7879 {
    7880  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    7881 }
    7882 
    7883 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    7884 {
    7885  outInfo.blockCount = 1;
    7886 
    7887  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    7888  outInfo.allocationCount = rangeCount - m_FreeCount;
    7889  outInfo.unusedRangeCount = m_FreeCount;
    7890 
    7891  outInfo.unusedBytes = m_SumFreeSize;
    7892  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
    7893 
    7894  outInfo.allocationSizeMin = UINT64_MAX;
    7895  outInfo.allocationSizeMax = 0;
    7896  outInfo.unusedRangeSizeMin = UINT64_MAX;
    7897  outInfo.unusedRangeSizeMax = 0;
    7898 
    7899  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    7900  suballocItem != m_Suballocations.cend();
    7901  ++suballocItem)
    7902  {
    7903  const VmaSuballocation& suballoc = *suballocItem;
    7904  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    7905  {
    7906  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    7907  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    7908  }
    7909  else
    7910  {
    7911  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    7912  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    7913  }
    7914  }
    7915 }
    7916 
    7917 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
    7918 {
    7919  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    7920 
    7921  inoutStats.size += GetSize();
    7922  inoutStats.unusedSize += m_SumFreeSize;
    7923  inoutStats.allocationCount += rangeCount - m_FreeCount;
    7924  inoutStats.unusedRangeCount += m_FreeCount;
    7925  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    7926 }
    7927 
    7928 #if VMA_STATS_STRING_ENABLED
    7929 
    7930 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
    7931 {
    7932  PrintDetailedMap_Begin(json,
    7933  m_SumFreeSize, // unusedBytes
    7934  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
    7935  m_FreeCount); // unusedRangeCount
    7936 
    7937  size_t i = 0;
    7938  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    7939  suballocItem != m_Suballocations.cend();
    7940  ++suballocItem, ++i)
    7941  {
    7942  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7943  {
    7944  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
    7945  }
    7946  else
    7947  {
    7948  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
    7949  }
    7950  }
    7951 
    7952  PrintDetailedMap_End(json);
    7953 }
    7954 
    7955 #endif // #if VMA_STATS_STRING_ENABLED
    7956 
    7957 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
    7958  uint32_t currentFrameIndex,
    7959  uint32_t frameInUseCount,
    7960  VkDeviceSize bufferImageGranularity,
    7961  VkDeviceSize allocSize,
    7962  VkDeviceSize allocAlignment,
    7963  bool upperAddress,
    7964  VmaSuballocationType allocType,
    7965  bool canMakeOtherLost,
    7966  uint32_t strategy,
    7967  VmaAllocationRequest* pAllocationRequest)
    7968 {
    7969  VMA_ASSERT(allocSize > 0);
    7970  VMA_ASSERT(!upperAddress);
    7971  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    7972  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    7973  VMA_HEAVY_ASSERT(Validate());
    7974 
    7975  pAllocationRequest->type = VmaAllocationRequestType::Normal;
    7976 
    7977  // There is not enough total free space in this block to fullfill the request: Early return.
    7978  if(canMakeOtherLost == false &&
    7979  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
    7980  {
    7981  return false;
    7982  }
    7983 
    7984  // New algorithm, efficiently searching freeSuballocationsBySize.
    7985  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    7986  if(freeSuballocCount > 0)
    7987  {
    7989  {
    7990  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
    7991  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7992  m_FreeSuballocationsBySize.data(),
    7993  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    7994  allocSize + 2 * VMA_DEBUG_MARGIN,
    7995  VmaSuballocationItemSizeLess());
    7996  size_t index = it - m_FreeSuballocationsBySize.data();
    7997  for(; index < freeSuballocCount; ++index)
    7998  {
    7999  if(CheckAllocation(
    8000  currentFrameIndex,
    8001  frameInUseCount,
    8002  bufferImageGranularity,
    8003  allocSize,
    8004  allocAlignment,
    8005  allocType,
    8006  m_FreeSuballocationsBySize[index],
    8007  false, // canMakeOtherLost
    8008  &pAllocationRequest->offset,
    8009  &pAllocationRequest->itemsToMakeLostCount,
    8010  &pAllocationRequest->sumFreeSize,
    8011  &pAllocationRequest->sumItemSize))
    8012  {
    8013  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    8014  return true;
    8015  }
    8016  }
    8017  }
    8018  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
    8019  {
    8020  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    8021  it != m_Suballocations.end();
    8022  ++it)
    8023  {
    8024  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
    8025  currentFrameIndex,
    8026  frameInUseCount,
    8027  bufferImageGranularity,
    8028  allocSize,
    8029  allocAlignment,
    8030  allocType,
    8031  it,
    8032  false, // canMakeOtherLost
    8033  &pAllocationRequest->offset,
    8034  &pAllocationRequest->itemsToMakeLostCount,
    8035  &pAllocationRequest->sumFreeSize,
    8036  &pAllocationRequest->sumItemSize))
    8037  {
    8038  pAllocationRequest->item = it;
    8039  return true;
    8040  }
    8041  }
    8042  }
    8043  else // WORST_FIT, FIRST_FIT
    8044  {
    8045  // Search staring from biggest suballocations.
    8046  for(size_t index = freeSuballocCount; index--; )
    8047  {
    8048  if(CheckAllocation(
    8049  currentFrameIndex,
    8050  frameInUseCount,
    8051  bufferImageGranularity,
    8052  allocSize,
    8053  allocAlignment,
    8054  allocType,
    8055  m_FreeSuballocationsBySize[index],
    8056  false, // canMakeOtherLost
    8057  &pAllocationRequest->offset,
    8058  &pAllocationRequest->itemsToMakeLostCount,
    8059  &pAllocationRequest->sumFreeSize,
    8060  &pAllocationRequest->sumItemSize))
    8061  {
    8062  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    8063  return true;
    8064  }
    8065  }
    8066  }
    8067  }
    8068 
    8069  if(canMakeOtherLost)
    8070  {
    8071  // Brute-force algorithm. TODO: Come up with something better.
    8072 
    8073  bool found = false;
    8074  VmaAllocationRequest tmpAllocRequest = {};
    8075  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
    8076  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    8077  suballocIt != m_Suballocations.end();
    8078  ++suballocIt)
    8079  {
    8080  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    8081  suballocIt->hAllocation->CanBecomeLost())
    8082  {
    8083  if(CheckAllocation(
    8084  currentFrameIndex,
    8085  frameInUseCount,
    8086  bufferImageGranularity,
    8087  allocSize,
    8088  allocAlignment,
    8089  allocType,
    8090  suballocIt,
    8091  canMakeOtherLost,
    8092  &tmpAllocRequest.offset,
    8093  &tmpAllocRequest.itemsToMakeLostCount,
    8094  &tmpAllocRequest.sumFreeSize,
    8095  &tmpAllocRequest.sumItemSize))
    8096  {
    8098  {
    8099  *pAllocationRequest = tmpAllocRequest;
    8100  pAllocationRequest->item = suballocIt;
    8101  break;
    8102  }
    8103  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
    8104  {
    8105  *pAllocationRequest = tmpAllocRequest;
    8106  pAllocationRequest->item = suballocIt;
    8107  found = true;
    8108  }
    8109  }
    8110  }
    8111  }
    8112 
    8113  return found;
    8114  }
    8115 
    8116  return false;
    8117 }
    8118 
    8119 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
    8120  uint32_t currentFrameIndex,
    8121  uint32_t frameInUseCount,
    8122  VmaAllocationRequest* pAllocationRequest)
    8123 {
    8124  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
    8125 
    8126  while(pAllocationRequest->itemsToMakeLostCount > 0)
    8127  {
    8128  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    8129  {
    8130  ++pAllocationRequest->item;
    8131  }
    8132  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    8133  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    8134  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    8135  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8136  {
    8137  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    8138  --pAllocationRequest->itemsToMakeLostCount;
    8139  }
    8140  else
    8141  {
    8142  return false;
    8143  }
    8144  }
    8145 
    8146  VMA_HEAVY_ASSERT(Validate());
    8147  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    8148  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8149 
    8150  return true;
    8151 }
    8152 
    8153 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    8154 {
    8155  uint32_t lostAllocationCount = 0;
    8156  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    8157  it != m_Suballocations.end();
    8158  ++it)
    8159  {
    8160  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    8161  it->hAllocation->CanBecomeLost() &&
    8162  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8163  {
    8164  it = FreeSuballocation(it);
    8165  ++lostAllocationCount;
    8166  }
    8167  }
    8168  return lostAllocationCount;
    8169 }
    8170 
    8171 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
    8172 {
    8173  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    8174  it != m_Suballocations.end();
    8175  ++it)
    8176  {
    8177  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    8178  {
    8179  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
    8180  {
    8181  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    8182  return VK_ERROR_VALIDATION_FAILED_EXT;
    8183  }
    8184  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
    8185  {
    8186  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    8187  return VK_ERROR_VALIDATION_FAILED_EXT;
    8188  }
    8189  }
    8190  }
    8191 
    8192  return VK_SUCCESS;
    8193 }
    8194 
    8195 void VmaBlockMetadata_Generic::Alloc(
    8196  const VmaAllocationRequest& request,
    8197  VmaSuballocationType type,
    8198  VkDeviceSize allocSize,
    8199  VmaAllocation hAllocation)
    8200 {
    8201  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
    8202  VMA_ASSERT(request.item != m_Suballocations.end());
    8203  VmaSuballocation& suballoc = *request.item;
    8204  // Given suballocation is a free block.
    8205  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8206  // Given offset is inside this suballocation.
    8207  VMA_ASSERT(request.offset >= suballoc.offset);
    8208  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    8209  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    8210  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    8211 
    8212  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    8213  // it to become used.
    8214  UnregisterFreeSuballocation(request.item);
    8215 
    8216  suballoc.offset = request.offset;
    8217  suballoc.size = allocSize;
    8218  suballoc.type = type;
    8219  suballoc.hAllocation = hAllocation;
    8220 
    8221  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    8222  if(paddingEnd)
    8223  {
    8224  VmaSuballocation paddingSuballoc = {};
    8225  paddingSuballoc.offset = request.offset + allocSize;
    8226  paddingSuballoc.size = paddingEnd;
    8227  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8228  VmaSuballocationList::iterator next = request.item;
    8229  ++next;
    8230  const VmaSuballocationList::iterator paddingEndItem =
    8231  m_Suballocations.insert(next, paddingSuballoc);
    8232  RegisterFreeSuballocation(paddingEndItem);
    8233  }
    8234 
    8235  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    8236  if(paddingBegin)
    8237  {
    8238  VmaSuballocation paddingSuballoc = {};
    8239  paddingSuballoc.offset = request.offset - paddingBegin;
    8240  paddingSuballoc.size = paddingBegin;
    8241  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8242  const VmaSuballocationList::iterator paddingBeginItem =
    8243  m_Suballocations.insert(request.item, paddingSuballoc);
    8244  RegisterFreeSuballocation(paddingBeginItem);
    8245  }
    8246 
    8247  // Update totals.
    8248  m_FreeCount = m_FreeCount - 1;
    8249  if(paddingBegin > 0)
    8250  {
    8251  ++m_FreeCount;
    8252  }
    8253  if(paddingEnd > 0)
    8254  {
    8255  ++m_FreeCount;
    8256  }
    8257  m_SumFreeSize -= allocSize;
    8258 }
    8259 
    8260 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
    8261 {
    8262  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    8263  suballocItem != m_Suballocations.end();
    8264  ++suballocItem)
    8265  {
    8266  VmaSuballocation& suballoc = *suballocItem;
    8267  if(suballoc.hAllocation == allocation)
    8268  {
    8269  FreeSuballocation(suballocItem);
    8270  VMA_HEAVY_ASSERT(Validate());
    8271  return;
    8272  }
    8273  }
    8274  VMA_ASSERT(0 && "Not found!");
    8275 }
    8276 
    8277 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
    8278 {
    8279  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    8280  suballocItem != m_Suballocations.end();
    8281  ++suballocItem)
    8282  {
    8283  VmaSuballocation& suballoc = *suballocItem;
    8284  if(suballoc.offset == offset)
    8285  {
    8286  FreeSuballocation(suballocItem);
    8287  return;
    8288  }
    8289  }
    8290  VMA_ASSERT(0 && "Not found!");
    8291 }
    8292 
    8293 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
    8294 {
    8295  typedef VmaSuballocationList::iterator iter_type;
    8296  for(iter_type suballocItem = m_Suballocations.begin();
    8297  suballocItem != m_Suballocations.end();
    8298  ++suballocItem)
    8299  {
    8300  VmaSuballocation& suballoc = *suballocItem;
    8301  if(suballoc.hAllocation == alloc)
    8302  {
    8303  iter_type nextItem = suballocItem;
    8304  ++nextItem;
    8305 
    8306  // Should have been ensured on higher level.
    8307  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
    8308 
    8309  // Shrinking.
    8310  if(newSize < alloc->GetSize())
    8311  {
    8312  const VkDeviceSize sizeDiff = suballoc.size - newSize;
    8313 
    8314  // There is next item.
    8315  if(nextItem != m_Suballocations.end())
    8316  {
    8317  // Next item is free.
    8318  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8319  {
    8320  // Grow this next item backward.
    8321  UnregisterFreeSuballocation(nextItem);
    8322  nextItem->offset -= sizeDiff;
    8323  nextItem->size += sizeDiff;
    8324  RegisterFreeSuballocation(nextItem);
    8325  }
    8326  // Next item is not free.
    8327  else
    8328  {
    8329  // Create free item after current one.
    8330  VmaSuballocation newFreeSuballoc;
    8331  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    8332  newFreeSuballoc.offset = suballoc.offset + newSize;
    8333  newFreeSuballoc.size = sizeDiff;
    8334  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8335  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
    8336  RegisterFreeSuballocation(newFreeSuballocIt);
    8337 
    8338  ++m_FreeCount;
    8339  }
    8340  }
    8341  // This is the last item.
    8342  else
    8343  {
    8344  // Create free item at the end.
    8345  VmaSuballocation newFreeSuballoc;
    8346  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    8347  newFreeSuballoc.offset = suballoc.offset + newSize;
    8348  newFreeSuballoc.size = sizeDiff;
    8349  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8350  m_Suballocations.push_back(newFreeSuballoc);
    8351 
    8352  iter_type newFreeSuballocIt = m_Suballocations.end();
    8353  RegisterFreeSuballocation(--newFreeSuballocIt);
    8354 
    8355  ++m_FreeCount;
    8356  }
    8357 
    8358  suballoc.size = newSize;
    8359  m_SumFreeSize += sizeDiff;
    8360  }
    8361  // Growing.
    8362  else
    8363  {
    8364  const VkDeviceSize sizeDiff = newSize - suballoc.size;
    8365 
    8366  // There is next item.
    8367  if(nextItem != m_Suballocations.end())
    8368  {
    8369  // Next item is free.
    8370  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8371  {
    8372  // There is not enough free space, including margin.
    8373  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
    8374  {
    8375  return false;
    8376  }
    8377 
    8378  // There is more free space than required.
    8379  if(nextItem->size > sizeDiff)
    8380  {
    8381  // Move and shrink this next item.
    8382  UnregisterFreeSuballocation(nextItem);
    8383  nextItem->offset += sizeDiff;
    8384  nextItem->size -= sizeDiff;
    8385  RegisterFreeSuballocation(nextItem);
    8386  }
    8387  // There is exactly the amount of free space required.
    8388  else
    8389  {
    8390  // Remove this next free item.
    8391  UnregisterFreeSuballocation(nextItem);
    8392  m_Suballocations.erase(nextItem);
    8393  --m_FreeCount;
    8394  }
    8395  }
    8396  // Next item is not free - there is no space to grow.
    8397  else
    8398  {
    8399  return false;
    8400  }
    8401  }
    8402  // This is the last item - there is no space to grow.
    8403  else
    8404  {
    8405  return false;
    8406  }
    8407 
    8408  suballoc.size = newSize;
    8409  m_SumFreeSize -= sizeDiff;
    8410  }
    8411 
    8412  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
    8413  return true;
    8414  }
    8415  }
    8416  VMA_ASSERT(0 && "Not found!");
    8417  return false;
    8418 }
    8419 
    8420 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
    8421 {
    8422  VkDeviceSize lastSize = 0;
    8423  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    8424  {
    8425  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    8426 
    8427  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
    8428  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    8429  VMA_VALIDATE(it->size >= lastSize);
    8430  lastSize = it->size;
    8431  }
    8432  return true;
    8433 }
    8434 
    8435 bool VmaBlockMetadata_Generic::CheckAllocation(
    8436  uint32_t currentFrameIndex,
    8437  uint32_t frameInUseCount,
    8438  VkDeviceSize bufferImageGranularity,
    8439  VkDeviceSize allocSize,
    8440  VkDeviceSize allocAlignment,
    8441  VmaSuballocationType allocType,
    8442  VmaSuballocationList::const_iterator suballocItem,
    8443  bool canMakeOtherLost,
    8444  VkDeviceSize* pOffset,
    8445  size_t* itemsToMakeLostCount,
    8446  VkDeviceSize* pSumFreeSize,
    8447  VkDeviceSize* pSumItemSize) const
    8448 {
    8449  VMA_ASSERT(allocSize > 0);
    8450  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    8451  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    8452  VMA_ASSERT(pOffset != VMA_NULL);
    8453 
    8454  *itemsToMakeLostCount = 0;
    8455  *pSumFreeSize = 0;
    8456  *pSumItemSize = 0;
    8457 
    8458  if(canMakeOtherLost)
    8459  {
    8460  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8461  {
    8462  *pSumFreeSize = suballocItem->size;
    8463  }
    8464  else
    8465  {
    8466  if(suballocItem->hAllocation->CanBecomeLost() &&
    8467  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8468  {
    8469  ++*itemsToMakeLostCount;
    8470  *pSumItemSize = suballocItem->size;
    8471  }
    8472  else
    8473  {
    8474  return false;
    8475  }
    8476  }
    8477 
    8478  // Remaining size is too small for this request: Early return.
    8479  if(GetSize() - suballocItem->offset < allocSize)
    8480  {
    8481  return false;
    8482  }
    8483 
    8484  // Start from offset equal to beginning of this suballocation.
    8485  *pOffset = suballocItem->offset;
    8486 
    8487  // Apply VMA_DEBUG_MARGIN at the beginning.
    8488  if(VMA_DEBUG_MARGIN > 0)
    8489  {
    8490  *pOffset += VMA_DEBUG_MARGIN;
    8491  }
    8492 
    8493  // Apply alignment.
    8494  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    8495 
    8496  // Check previous suballocations for BufferImageGranularity conflicts.
    8497  // Make bigger alignment if necessary.
    8498  if(bufferImageGranularity > 1)
    8499  {
    8500  bool bufferImageGranularityConflict = false;
    8501  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    8502  while(prevSuballocItem != m_Suballocations.cbegin())
    8503  {
    8504  --prevSuballocItem;
    8505  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    8506  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    8507  {
    8508  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8509  {
    8510  bufferImageGranularityConflict = true;
    8511  break;
    8512  }
    8513  }
    8514  else
    8515  // Already on previous page.
    8516  break;
    8517  }
    8518  if(bufferImageGranularityConflict)
    8519  {
    8520  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    8521  }
    8522  }
    8523 
    8524  // Now that we have final *pOffset, check if we are past suballocItem.
    8525  // If yes, return false - this function should be called for another suballocItem as starting point.
    8526  if(*pOffset >= suballocItem->offset + suballocItem->size)
    8527  {
    8528  return false;
    8529  }
    8530 
    8531  // Calculate padding at the beginning based on current offset.
    8532  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    8533 
    8534  // Calculate required margin at the end.
    8535  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    8536 
    8537  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    8538  // Another early return check.
    8539  if(suballocItem->offset + totalSize > GetSize())
    8540  {
    8541  return false;
    8542  }
    8543 
    8544  // Advance lastSuballocItem until desired size is reached.
    8545  // Update itemsToMakeLostCount.
    8546  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    8547  if(totalSize > suballocItem->size)
    8548  {
    8549  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    8550  while(remainingSize > 0)
    8551  {
    8552  ++lastSuballocItem;
    8553  if(lastSuballocItem == m_Suballocations.cend())
    8554  {
    8555  return false;
    8556  }
    8557  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8558  {
    8559  *pSumFreeSize += lastSuballocItem->size;
    8560  }
    8561  else
    8562  {
    8563  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    8564  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    8565  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8566  {
    8567  ++*itemsToMakeLostCount;
    8568  *pSumItemSize += lastSuballocItem->size;
    8569  }
    8570  else
    8571  {
    8572  return false;
    8573  }
    8574  }
    8575  remainingSize = (lastSuballocItem->size < remainingSize) ?
    8576  remainingSize - lastSuballocItem->size : 0;
    8577  }
    8578  }
    8579 
    8580  // Check next suballocations for BufferImageGranularity conflicts.
    8581  // If conflict exists, we must mark more allocations lost or fail.
    8582  if(bufferImageGranularity > 1)
    8583  {
    8584  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    8585  ++nextSuballocItem;
    8586  while(nextSuballocItem != m_Suballocations.cend())
    8587  {
    8588  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    8589  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8590  {
    8591  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8592  {
    8593  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    8594  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    8595  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8596  {
    8597  ++*itemsToMakeLostCount;
    8598  }
    8599  else
    8600  {
    8601  return false;
    8602  }
    8603  }
    8604  }
    8605  else
    8606  {
    8607  // Already on next page.
    8608  break;
    8609  }
    8610  ++nextSuballocItem;
    8611  }
    8612  }
    8613  }
    8614  else
    8615  {
    8616  const VmaSuballocation& suballoc = *suballocItem;
    8617  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8618 
    8619  *pSumFreeSize = suballoc.size;
    8620 
    8621  // Size of this suballocation is too small for this request: Early return.
    8622  if(suballoc.size < allocSize)
    8623  {
    8624  return false;
    8625  }
    8626 
    8627  // Start from offset equal to beginning of this suballocation.
    8628  *pOffset = suballoc.offset;
    8629 
    8630  // Apply VMA_DEBUG_MARGIN at the beginning.
    8631  if(VMA_DEBUG_MARGIN > 0)
    8632  {
    8633  *pOffset += VMA_DEBUG_MARGIN;
    8634  }
    8635 
    8636  // Apply alignment.
    8637  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    8638 
    8639  // Check previous suballocations for BufferImageGranularity conflicts.
    8640  // Make bigger alignment if necessary.
    8641  if(bufferImageGranularity > 1)
    8642  {
    8643  bool bufferImageGranularityConflict = false;
    8644  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    8645  while(prevSuballocItem != m_Suballocations.cbegin())
    8646  {
    8647  --prevSuballocItem;
    8648  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    8649  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    8650  {
    8651  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8652  {
    8653  bufferImageGranularityConflict = true;
    8654  break;
    8655  }
    8656  }
    8657  else
    8658  // Already on previous page.
    8659  break;
    8660  }
    8661  if(bufferImageGranularityConflict)
    8662  {
    8663  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    8664  }
    8665  }
    8666 
    8667  // Calculate padding at the beginning based on current offset.
    8668  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    8669 
    8670  // Calculate required margin at the end.
    8671  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    8672 
    8673  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    8674  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    8675  {
    8676  return false;
    8677  }
    8678 
    8679  // Check next suballocations for BufferImageGranularity conflicts.
    8680  // If conflict exists, allocation cannot be made here.
    8681  if(bufferImageGranularity > 1)
    8682  {
    8683  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    8684  ++nextSuballocItem;
    8685  while(nextSuballocItem != m_Suballocations.cend())
    8686  {
    8687  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    8688  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8689  {
    8690  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8691  {
    8692  return false;
    8693  }
    8694  }
    8695  else
    8696  {
    8697  // Already on next page.
    8698  break;
    8699  }
    8700  ++nextSuballocItem;
    8701  }
    8702  }
    8703  }
    8704 
    8705  // All tests passed: Success. pOffset is already filled.
    8706  return true;
    8707 }
    8708 
    8709 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
    8710 {
    8711  VMA_ASSERT(item != m_Suballocations.end());
    8712  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8713 
    8714  VmaSuballocationList::iterator nextItem = item;
    8715  ++nextItem;
    8716  VMA_ASSERT(nextItem != m_Suballocations.end());
    8717  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    8718 
    8719  item->size += nextItem->size;
    8720  --m_FreeCount;
    8721  m_Suballocations.erase(nextItem);
    8722 }
    8723 
    8724 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    8725 {
    8726  // Change this suballocation to be marked as free.
    8727  VmaSuballocation& suballoc = *suballocItem;
    8728  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8729  suballoc.hAllocation = VK_NULL_HANDLE;
    8730 
    8731  // Update totals.
    8732  ++m_FreeCount;
    8733  m_SumFreeSize += suballoc.size;
    8734 
    8735  // Merge with previous and/or next suballocation if it's also free.
    8736  bool mergeWithNext = false;
    8737  bool mergeWithPrev = false;
    8738 
    8739  VmaSuballocationList::iterator nextItem = suballocItem;
    8740  ++nextItem;
    8741  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    8742  {
    8743  mergeWithNext = true;
    8744  }
    8745 
    8746  VmaSuballocationList::iterator prevItem = suballocItem;
    8747  if(suballocItem != m_Suballocations.begin())
    8748  {
    8749  --prevItem;
    8750  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8751  {
    8752  mergeWithPrev = true;
    8753  }
    8754  }
    8755 
    8756  if(mergeWithNext)
    8757  {
    8758  UnregisterFreeSuballocation(nextItem);
    8759  MergeFreeWithNext(suballocItem);
    8760  }
    8761 
    8762  if(mergeWithPrev)
    8763  {
    8764  UnregisterFreeSuballocation(prevItem);
    8765  MergeFreeWithNext(prevItem);
    8766  RegisterFreeSuballocation(prevItem);
    8767  return prevItem;
    8768  }
    8769  else
    8770  {
    8771  RegisterFreeSuballocation(suballocItem);
    8772  return suballocItem;
    8773  }
    8774 }
    8775 
    8776 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    8777 {
    8778  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8779  VMA_ASSERT(item->size > 0);
    8780 
    8781  // You may want to enable this validation at the beginning or at the end of
    8782  // this function, depending on what do you want to check.
    8783  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8784 
    8785  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    8786  {
    8787  if(m_FreeSuballocationsBySize.empty())
    8788  {
    8789  m_FreeSuballocationsBySize.push_back(item);
    8790  }
    8791  else
    8792  {
    8793  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    8794  }
    8795  }
    8796 
    8797  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8798 }
    8799 
    8800 
    8801 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    8802 {
    8803  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8804  VMA_ASSERT(item->size > 0);
    8805 
    8806  // You may want to enable this validation at the beginning or at the end of
    8807  // this function, depending on what do you want to check.
    8808  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8809 
    8810  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    8811  {
    8812  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    8813  m_FreeSuballocationsBySize.data(),
    8814  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    8815  item,
    8816  VmaSuballocationItemSizeLess());
    8817  for(size_t index = it - m_FreeSuballocationsBySize.data();
    8818  index < m_FreeSuballocationsBySize.size();
    8819  ++index)
    8820  {
    8821  if(m_FreeSuballocationsBySize[index] == item)
    8822  {
    8823  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    8824  return;
    8825  }
    8826  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    8827  }
    8828  VMA_ASSERT(0 && "Not found.");
    8829  }
    8830 
    8831  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8832 }
    8833 
    8834 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
    8835  VkDeviceSize bufferImageGranularity,
    8836  VmaSuballocationType& inOutPrevSuballocType) const
    8837 {
    8838  if(bufferImageGranularity == 1 || IsEmpty())
    8839  {
    8840  return false;
    8841  }
    8842 
    8843  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
    8844  bool typeConflictFound = false;
    8845  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
    8846  it != m_Suballocations.cend();
    8847  ++it)
    8848  {
    8849  const VmaSuballocationType suballocType = it->type;
    8850  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
    8851  {
    8852  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
    8853  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
    8854  {
    8855  typeConflictFound = true;
    8856  }
    8857  inOutPrevSuballocType = suballocType;
    8858  }
    8859  }
    8860 
    8861  return typeConflictFound || minAlignment >= bufferImageGranularity;
    8862 }
    8863 
    8865 // class VmaBlockMetadata_Linear
    8866 
    8867 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
    8868  VmaBlockMetadata(hAllocator),
    8869  m_SumFreeSize(0),
    8870  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    8871  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    8872  m_1stVectorIndex(0),
    8873  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
    8874  m_1stNullItemsBeginCount(0),
    8875  m_1stNullItemsMiddleCount(0),
    8876  m_2ndNullItemsCount(0)
    8877 {
    8878 }
    8879 
    8880 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
    8881 {
    8882 }
    8883 
    8884 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
    8885 {
    8886  VmaBlockMetadata::Init(size);
    8887  m_SumFreeSize = size;
    8888 }
    8889 
    8890 bool VmaBlockMetadata_Linear::Validate() const
    8891 {
    8892  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8893  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8894 
    8895  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
    8896  VMA_VALIDATE(!suballocations1st.empty() ||
    8897  suballocations2nd.empty() ||
    8898  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
    8899 
    8900  if(!suballocations1st.empty())
    8901  {
    8902  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
    8903  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
    8904  // Null item at the end should be just pop_back().
    8905  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
    8906  }
    8907  if(!suballocations2nd.empty())
    8908  {
    8909  // Null item at the end should be just pop_back().
    8910  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
    8911  }
    8912 
    8913  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
    8914  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
    8915 
    8916  VkDeviceSize sumUsedSize = 0;
    8917  const size_t suballoc1stCount = suballocations1st.size();
    8918  VkDeviceSize offset = VMA_DEBUG_MARGIN;
    8919 
    8920  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8921  {
    8922  const size_t suballoc2ndCount = suballocations2nd.size();
    8923  size_t nullItem2ndCount = 0;
    8924  for(size_t i = 0; i < suballoc2ndCount; ++i)
    8925  {
    8926  const VmaSuballocation& suballoc = suballocations2nd[i];
    8927  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8928 
    8929  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    8930  VMA_VALIDATE(suballoc.offset >= offset);
    8931 
    8932  if(!currFree)
    8933  {
    8934  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    8935  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    8936  sumUsedSize += suballoc.size;
    8937  }
    8938  else
    8939  {
    8940  ++nullItem2ndCount;
    8941  }
    8942 
    8943  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    8944  }
    8945 
    8946  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    8947  }
    8948 
    8949  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
    8950  {
    8951  const VmaSuballocation& suballoc = suballocations1st[i];
    8952  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
    8953  suballoc.hAllocation == VK_NULL_HANDLE);
    8954  }
    8955 
    8956  size_t nullItem1stCount = m_1stNullItemsBeginCount;
    8957 
    8958  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
    8959  {
    8960  const VmaSuballocation& suballoc = suballocations1st[i];
    8961  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8962 
    8963  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    8964  VMA_VALIDATE(suballoc.offset >= offset);
    8965  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
    8966 
    8967  if(!currFree)
    8968  {
    8969  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    8970  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    8971  sumUsedSize += suballoc.size;
    8972  }
    8973  else
    8974  {
    8975  ++nullItem1stCount;
    8976  }
    8977 
    8978  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    8979  }
    8980  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
    8981 
    8982  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8983  {
    8984  const size_t suballoc2ndCount = suballocations2nd.size();
    8985  size_t nullItem2ndCount = 0;
    8986  for(size_t i = suballoc2ndCount; i--; )
    8987  {
    8988  const VmaSuballocation& suballoc = suballocations2nd[i];
    8989  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8990 
    8991  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    8992  VMA_VALIDATE(suballoc.offset >= offset);
    8993 
    8994  if(!currFree)
    8995  {
    8996  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    8997  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    8998  sumUsedSize += suballoc.size;
    8999  }
    9000  else
    9001  {
    9002  ++nullItem2ndCount;
    9003  }
    9004 
    9005  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    9006  }
    9007 
    9008  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    9009  }
    9010 
    9011  VMA_VALIDATE(offset <= GetSize());
    9012  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
    9013 
    9014  return true;
    9015 }
    9016 
    9017 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
    9018 {
    9019  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
    9020  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
    9021 }
    9022 
    9023 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
    9024 {
    9025  const VkDeviceSize size = GetSize();
    9026 
    9027  /*
    9028  We don't consider gaps inside allocation vectors with freed allocations because
    9029  they are not suitable for reuse in linear allocator. We consider only space that
    9030  is available for new allocations.
    9031  */
    9032  if(IsEmpty())
    9033  {
    9034  return size;
    9035  }
    9036 
    9037  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9038 
    9039  switch(m_2ndVectorMode)
    9040  {
    9041  case SECOND_VECTOR_EMPTY:
    9042  /*
    9043  Available space is after end of 1st, as well as before beginning of 1st (which
    9044  whould make it a ring buffer).
    9045  */
    9046  {
    9047  const size_t suballocations1stCount = suballocations1st.size();
    9048  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
    9049  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    9050  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
    9051  return VMA_MAX(
    9052  firstSuballoc.offset,
    9053  size - (lastSuballoc.offset + lastSuballoc.size));
    9054  }
    9055  break;
    9056 
    9057  case SECOND_VECTOR_RING_BUFFER:
    9058  /*
    9059  Available space is only between end of 2nd and beginning of 1st.
    9060  */
    9061  {
    9062  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9063  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
    9064  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
    9065  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
    9066  }
    9067  break;
    9068 
    9069  case SECOND_VECTOR_DOUBLE_STACK:
    9070  /*
    9071  Available space is only between end of 1st and top of 2nd.
    9072  */
    9073  {
    9074  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9075  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
    9076  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
    9077  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
    9078  }
    9079  break;
    9080 
    9081  default:
    9082  VMA_ASSERT(0);
    9083  return 0;
    9084  }
    9085 }
    9086 
    9087 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    9088 {
    9089  const VkDeviceSize size = GetSize();
    9090  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9091  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9092  const size_t suballoc1stCount = suballocations1st.size();
    9093  const size_t suballoc2ndCount = suballocations2nd.size();
    9094 
    9095  outInfo.blockCount = 1;
    9096  outInfo.allocationCount = (uint32_t)GetAllocationCount();
    9097  outInfo.unusedRangeCount = 0;
    9098  outInfo.usedBytes = 0;
    9099  outInfo.allocationSizeMin = UINT64_MAX;
    9100  outInfo.allocationSizeMax = 0;
    9101  outInfo.unusedRangeSizeMin = UINT64_MAX;
    9102  outInfo.unusedRangeSizeMax = 0;
    9103 
    9104  VkDeviceSize lastOffset = 0;
    9105 
    9106  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9107  {
    9108  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9109  size_t nextAlloc2ndIndex = 0;
    9110  while(lastOffset < freeSpace2ndTo1stEnd)
    9111  {
    9112  // Find next non-null allocation or move nextAllocIndex to the end.
    9113  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9114  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9115  {
    9116  ++nextAlloc2ndIndex;
    9117  }
    9118 
    9119  // Found non-null allocation.
    9120  if(nextAlloc2ndIndex < suballoc2ndCount)
    9121  {
    9122  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9123 
    9124  // 1. Process free space before this allocation.
    9125  if(lastOffset < suballoc.offset)
    9126  {
    9127  // There is free space from lastOffset to suballoc.offset.
    9128  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9129  ++outInfo.unusedRangeCount;
    9130  outInfo.unusedBytes += unusedRangeSize;
    9131  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9132  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9133  }
    9134 
    9135  // 2. Process this allocation.
    9136  // There is allocation with suballoc.offset, suballoc.size.
    9137  outInfo.usedBytes += suballoc.size;
    9138  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    9139  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    9140 
    9141  // 3. Prepare for next iteration.
    9142  lastOffset = suballoc.offset + suballoc.size;
    9143  ++nextAlloc2ndIndex;
    9144  }
    9145  // We are at the end.
    9146  else
    9147  {
    9148  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9149  if(lastOffset < freeSpace2ndTo1stEnd)
    9150  {
    9151  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    9152  ++outInfo.unusedRangeCount;
    9153  outInfo.unusedBytes += unusedRangeSize;
    9154  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9155  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9156  }
    9157 
    9158  // End of loop.
    9159  lastOffset = freeSpace2ndTo1stEnd;
    9160  }
    9161  }
    9162  }
    9163 
    9164  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9165  const VkDeviceSize freeSpace1stTo2ndEnd =
    9166  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    9167  while(lastOffset < freeSpace1stTo2ndEnd)
    9168  {
    9169  // Find next non-null allocation or move nextAllocIndex to the end.
    9170  while(nextAlloc1stIndex < suballoc1stCount &&
    9171  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9172  {
    9173  ++nextAlloc1stIndex;
    9174  }
    9175 
    9176  // Found non-null allocation.
    9177  if(nextAlloc1stIndex < suballoc1stCount)
    9178  {
    9179  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9180 
    9181  // 1. Process free space before this allocation.
    9182  if(lastOffset < suballoc.offset)
    9183  {
    9184  // There is free space from lastOffset to suballoc.offset.
    9185  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9186  ++outInfo.unusedRangeCount;
    9187  outInfo.unusedBytes += unusedRangeSize;
    9188  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9189  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9190  }
    9191 
    9192  // 2. Process this allocation.
    9193  // There is allocation with suballoc.offset, suballoc.size.
    9194  outInfo.usedBytes += suballoc.size;
    9195  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    9196  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    9197 
    9198  // 3. Prepare for next iteration.
    9199  lastOffset = suballoc.offset + suballoc.size;
    9200  ++nextAlloc1stIndex;
    9201  }
    9202  // We are at the end.
    9203  else
    9204  {
    9205  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9206  if(lastOffset < freeSpace1stTo2ndEnd)
    9207  {
    9208  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    9209  ++outInfo.unusedRangeCount;
    9210  outInfo.unusedBytes += unusedRangeSize;
    9211  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9212  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9213  }
    9214 
    9215  // End of loop.
    9216  lastOffset = freeSpace1stTo2ndEnd;
    9217  }
    9218  }
    9219 
    9220  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9221  {
    9222  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9223  while(lastOffset < size)
    9224  {
    9225  // Find next non-null allocation or move nextAllocIndex to the end.
    9226  while(nextAlloc2ndIndex != SIZE_MAX &&
    9227  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9228  {
    9229  --nextAlloc2ndIndex;
    9230  }
    9231 
    9232  // Found non-null allocation.
    9233  if(nextAlloc2ndIndex != SIZE_MAX)
    9234  {
    9235  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9236 
    9237  // 1. Process free space before this allocation.
    9238  if(lastOffset < suballoc.offset)
    9239  {
    9240  // There is free space from lastOffset to suballoc.offset.
    9241  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9242  ++outInfo.unusedRangeCount;
    9243  outInfo.unusedBytes += unusedRangeSize;
    9244  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9245  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9246  }
    9247 
    9248  // 2. Process this allocation.
    9249  // There is allocation with suballoc.offset, suballoc.size.
    9250  outInfo.usedBytes += suballoc.size;
    9251  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    9252  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    9253 
    9254  // 3. Prepare for next iteration.
    9255  lastOffset = suballoc.offset + suballoc.size;
    9256  --nextAlloc2ndIndex;
    9257  }
    9258  // We are at the end.
    9259  else
    9260  {
    9261  // There is free space from lastOffset to size.
    9262  if(lastOffset < size)
    9263  {
    9264  const VkDeviceSize unusedRangeSize = size - lastOffset;
    9265  ++outInfo.unusedRangeCount;
    9266  outInfo.unusedBytes += unusedRangeSize;
    9267  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9268  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9269  }
    9270 
    9271  // End of loop.
    9272  lastOffset = size;
    9273  }
    9274  }
    9275  }
    9276 
    9277  outInfo.unusedBytes = size - outInfo.usedBytes;
    9278 }
    9279 
    9280 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
    9281 {
    9282  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9283  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9284  const VkDeviceSize size = GetSize();
    9285  const size_t suballoc1stCount = suballocations1st.size();
    9286  const size_t suballoc2ndCount = suballocations2nd.size();
    9287 
    9288  inoutStats.size += size;
    9289 
    9290  VkDeviceSize lastOffset = 0;
    9291 
    9292  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9293  {
    9294  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9295  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
    9296  while(lastOffset < freeSpace2ndTo1stEnd)
    9297  {
    9298  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9299  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9300  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9301  {
    9302  ++nextAlloc2ndIndex;
    9303  }
    9304 
    9305  // Found non-null allocation.
    9306  if(nextAlloc2ndIndex < suballoc2ndCount)
    9307  {
    9308  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9309 
    9310  // 1. Process free space before this allocation.
    9311  if(lastOffset < suballoc.offset)
    9312  {
    9313  // There is free space from lastOffset to suballoc.offset.
    9314  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9315  inoutStats.unusedSize += unusedRangeSize;
    9316  ++inoutStats.unusedRangeCount;
    9317  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9318  }
    9319 
    9320  // 2. Process this allocation.
    9321  // There is allocation with suballoc.offset, suballoc.size.
    9322  ++inoutStats.allocationCount;
    9323 
    9324  // 3. Prepare for next iteration.
    9325  lastOffset = suballoc.offset + suballoc.size;
    9326  ++nextAlloc2ndIndex;
    9327  }
    9328  // We are at the end.
    9329  else
    9330  {
    9331  if(lastOffset < freeSpace2ndTo1stEnd)
    9332  {
    9333  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9334  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    9335  inoutStats.unusedSize += unusedRangeSize;
    9336  ++inoutStats.unusedRangeCount;
    9337  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9338  }
    9339 
    9340  // End of loop.
    9341  lastOffset = freeSpace2ndTo1stEnd;
    9342  }
    9343  }
    9344  }
    9345 
    9346  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9347  const VkDeviceSize freeSpace1stTo2ndEnd =
    9348  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    9349  while(lastOffset < freeSpace1stTo2ndEnd)
    9350  {
    9351  // Find next non-null allocation or move nextAllocIndex to the end.
    9352  while(nextAlloc1stIndex < suballoc1stCount &&
    9353  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9354  {
    9355  ++nextAlloc1stIndex;
    9356  }
    9357 
    9358  // Found non-null allocation.
    9359  if(nextAlloc1stIndex < suballoc1stCount)
    9360  {
    9361  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9362 
    9363  // 1. Process free space before this allocation.
    9364  if(lastOffset < suballoc.offset)
    9365  {
    9366  // There is free space from lastOffset to suballoc.offset.
    9367  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9368  inoutStats.unusedSize += unusedRangeSize;
    9369  ++inoutStats.unusedRangeCount;
    9370  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9371  }
    9372 
    9373  // 2. Process this allocation.
    9374  // There is allocation with suballoc.offset, suballoc.size.
    9375  ++inoutStats.allocationCount;
    9376 
    9377  // 3. Prepare for next iteration.
    9378  lastOffset = suballoc.offset + suballoc.size;
    9379  ++nextAlloc1stIndex;
    9380  }
    9381  // We are at the end.
    9382  else
    9383  {
    9384  if(lastOffset < freeSpace1stTo2ndEnd)
    9385  {
    9386  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9387  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    9388  inoutStats.unusedSize += unusedRangeSize;
    9389  ++inoutStats.unusedRangeCount;
    9390  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9391  }
    9392 
    9393  // End of loop.
    9394  lastOffset = freeSpace1stTo2ndEnd;
    9395  }
    9396  }
    9397 
    9398  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9399  {
    9400  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9401  while(lastOffset < size)
    9402  {
    9403  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9404  while(nextAlloc2ndIndex != SIZE_MAX &&
    9405  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9406  {
    9407  --nextAlloc2ndIndex;
    9408  }
    9409 
    9410  // Found non-null allocation.
    9411  if(nextAlloc2ndIndex != SIZE_MAX)
    9412  {
    9413  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9414 
    9415  // 1. Process free space before this allocation.
    9416  if(lastOffset < suballoc.offset)
    9417  {
    9418  // There is free space from lastOffset to suballoc.offset.
    9419  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9420  inoutStats.unusedSize += unusedRangeSize;
    9421  ++inoutStats.unusedRangeCount;
    9422  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9423  }
    9424 
    9425  // 2. Process this allocation.
    9426  // There is allocation with suballoc.offset, suballoc.size.
    9427  ++inoutStats.allocationCount;
    9428 
    9429  // 3. Prepare for next iteration.
    9430  lastOffset = suballoc.offset + suballoc.size;
    9431  --nextAlloc2ndIndex;
    9432  }
    9433  // We are at the end.
    9434  else
    9435  {
    9436  if(lastOffset < size)
    9437  {
    9438  // There is free space from lastOffset to size.
    9439  const VkDeviceSize unusedRangeSize = size - lastOffset;
    9440  inoutStats.unusedSize += unusedRangeSize;
    9441  ++inoutStats.unusedRangeCount;
    9442  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9443  }
    9444 
    9445  // End of loop.
    9446  lastOffset = size;
    9447  }
    9448  }
    9449  }
    9450 }
    9451 
    9452 #if VMA_STATS_STRING_ENABLED
    9453 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
    9454 {
    9455  const VkDeviceSize size = GetSize();
    9456  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9457  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9458  const size_t suballoc1stCount = suballocations1st.size();
    9459  const size_t suballoc2ndCount = suballocations2nd.size();
    9460 
    9461  // FIRST PASS
    9462 
    9463  size_t unusedRangeCount = 0;
    9464  VkDeviceSize usedBytes = 0;
    9465 
    9466  VkDeviceSize lastOffset = 0;
    9467 
    9468  size_t alloc2ndCount = 0;
    9469  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9470  {
    9471  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9472  size_t nextAlloc2ndIndex = 0;
    9473  while(lastOffset < freeSpace2ndTo1stEnd)
    9474  {
    9475  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9476  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9477  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9478  {
    9479  ++nextAlloc2ndIndex;
    9480  }
    9481 
    9482  // Found non-null allocation.
    9483  if(nextAlloc2ndIndex < suballoc2ndCount)
    9484  {
    9485  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9486 
    9487  // 1. Process free space before this allocation.
    9488  if(lastOffset < suballoc.offset)
    9489  {
    9490  // There is free space from lastOffset to suballoc.offset.
    9491  ++unusedRangeCount;
    9492  }
    9493 
    9494  // 2. Process this allocation.
    9495  // There is allocation with suballoc.offset, suballoc.size.
    9496  ++alloc2ndCount;
    9497  usedBytes += suballoc.size;
    9498 
    9499  // 3. Prepare for next iteration.
    9500  lastOffset = suballoc.offset + suballoc.size;
    9501  ++nextAlloc2ndIndex;
    9502  }
    9503  // We are at the end.
    9504  else
    9505  {
    9506  if(lastOffset < freeSpace2ndTo1stEnd)
    9507  {
    9508  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9509  ++unusedRangeCount;
    9510  }
    9511 
    9512  // End of loop.
    9513  lastOffset = freeSpace2ndTo1stEnd;
    9514  }
    9515  }
    9516  }
    9517 
    9518  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9519  size_t alloc1stCount = 0;
    9520  const VkDeviceSize freeSpace1stTo2ndEnd =
    9521  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    9522  while(lastOffset < freeSpace1stTo2ndEnd)
    9523  {
    9524  // Find next non-null allocation or move nextAllocIndex to the end.
    9525  while(nextAlloc1stIndex < suballoc1stCount &&
    9526  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9527  {
    9528  ++nextAlloc1stIndex;
    9529  }
    9530 
    9531  // Found non-null allocation.
    9532  if(nextAlloc1stIndex < suballoc1stCount)
    9533  {
    9534  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9535 
    9536  // 1. Process free space before this allocation.
    9537  if(lastOffset < suballoc.offset)
    9538  {
    9539  // There is free space from lastOffset to suballoc.offset.
    9540  ++unusedRangeCount;
    9541  }
    9542 
    9543  // 2. Process this allocation.
    9544  // There is allocation with suballoc.offset, suballoc.size.
    9545  ++alloc1stCount;
    9546  usedBytes += suballoc.size;
    9547 
    9548  // 3. Prepare for next iteration.
    9549  lastOffset = suballoc.offset + suballoc.size;
    9550  ++nextAlloc1stIndex;
    9551  }
    9552  // We are at the end.
    9553  else
    9554  {
    9555  if(lastOffset < size)
    9556  {
    9557  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9558  ++unusedRangeCount;
    9559  }
    9560 
    9561  // End of loop.
    9562  lastOffset = freeSpace1stTo2ndEnd;
    9563  }
    9564  }
    9565 
    9566  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9567  {
    9568  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9569  while(lastOffset < size)
    9570  {
    9571  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9572  while(nextAlloc2ndIndex != SIZE_MAX &&
    9573  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9574  {
    9575  --nextAlloc2ndIndex;
    9576  }
    9577 
    9578  // Found non-null allocation.
    9579  if(nextAlloc2ndIndex != SIZE_MAX)
    9580  {
    9581  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9582 
    9583  // 1. Process free space before this allocation.
    9584  if(lastOffset < suballoc.offset)
    9585  {
    9586  // There is free space from lastOffset to suballoc.offset.
    9587  ++unusedRangeCount;
    9588  }
    9589 
    9590  // 2. Process this allocation.
    9591  // There is allocation with suballoc.offset, suballoc.size.
    9592  ++alloc2ndCount;
    9593  usedBytes += suballoc.size;
    9594 
    9595  // 3. Prepare for next iteration.
    9596  lastOffset = suballoc.offset + suballoc.size;
    9597  --nextAlloc2ndIndex;
    9598  }
    9599  // We are at the end.
    9600  else
    9601  {
    9602  if(lastOffset < size)
    9603  {
    9604  // There is free space from lastOffset to size.
    9605  ++unusedRangeCount;
    9606  }
    9607 
    9608  // End of loop.
    9609  lastOffset = size;
    9610  }
    9611  }
    9612  }
    9613 
    9614  const VkDeviceSize unusedBytes = size - usedBytes;
    9615  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
    9616 
    9617  // SECOND PASS
    9618  lastOffset = 0;
    9619 
    9620  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9621  {
    9622  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9623  size_t nextAlloc2ndIndex = 0;
    9624  while(lastOffset < freeSpace2ndTo1stEnd)
    9625  {
    9626  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9627  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9628  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9629  {
    9630  ++nextAlloc2ndIndex;
    9631  }
    9632 
    9633  // Found non-null allocation.
    9634  if(nextAlloc2ndIndex < suballoc2ndCount)
    9635  {
    9636  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9637 
    9638  // 1. Process free space before this allocation.
    9639  if(lastOffset < suballoc.offset)
    9640  {
    9641  // There is free space from lastOffset to suballoc.offset.
    9642  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9643  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9644  }
    9645 
    9646  // 2. Process this allocation.
    9647  // There is allocation with suballoc.offset, suballoc.size.
    9648  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    9649 
    9650  // 3. Prepare for next iteration.
    9651  lastOffset = suballoc.offset + suballoc.size;
    9652  ++nextAlloc2ndIndex;
    9653  }
    9654  // We are at the end.
    9655  else
    9656  {
    9657  if(lastOffset < freeSpace2ndTo1stEnd)
    9658  {
    9659  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9660  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    9661  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9662  }
    9663 
    9664  // End of loop.
    9665  lastOffset = freeSpace2ndTo1stEnd;
    9666  }
    9667  }
    9668  }
    9669 
    9670  nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9671  while(lastOffset < freeSpace1stTo2ndEnd)
    9672  {
    9673  // Find next non-null allocation or move nextAllocIndex to the end.
    9674  while(nextAlloc1stIndex < suballoc1stCount &&
    9675  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9676  {
    9677  ++nextAlloc1stIndex;
    9678  }
    9679 
    9680  // Found non-null allocation.
    9681  if(nextAlloc1stIndex < suballoc1stCount)
    9682  {
    9683  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9684 
    9685  // 1. Process free space before this allocation.
    9686  if(lastOffset < suballoc.offset)
    9687  {
    9688  // There is free space from lastOffset to suballoc.offset.
    9689  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9690  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9691  }
    9692 
    9693  // 2. Process this allocation.
    9694  // There is allocation with suballoc.offset, suballoc.size.
    9695  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    9696 
    9697  // 3. Prepare for next iteration.
    9698  lastOffset = suballoc.offset + suballoc.size;
    9699  ++nextAlloc1stIndex;
    9700  }
    9701  // We are at the end.
    9702  else
    9703  {
    9704  if(lastOffset < freeSpace1stTo2ndEnd)
    9705  {
    9706  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9707  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    9708  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9709  }
    9710 
    9711  // End of loop.
    9712  lastOffset = freeSpace1stTo2ndEnd;
    9713  }
    9714  }
    9715 
    9716  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9717  {
    9718  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9719  while(lastOffset < size)
    9720  {
    9721  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9722  while(nextAlloc2ndIndex != SIZE_MAX &&
    9723  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9724  {
    9725  --nextAlloc2ndIndex;
    9726  }
    9727 
    9728  // Found non-null allocation.
    9729  if(nextAlloc2ndIndex != SIZE_MAX)
    9730  {
    9731  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9732 
    9733  // 1. Process free space before this allocation.
    9734  if(lastOffset < suballoc.offset)
    9735  {
    9736  // There is free space from lastOffset to suballoc.offset.
    9737  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9738  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9739  }
    9740 
    9741  // 2. Process this allocation.
    9742  // There is allocation with suballoc.offset, suballoc.size.
    9743  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    9744 
    9745  // 3. Prepare for next iteration.
    9746  lastOffset = suballoc.offset + suballoc.size;
    9747  --nextAlloc2ndIndex;
    9748  }
    9749  // We are at the end.
    9750  else
    9751  {
    9752  if(lastOffset < size)
    9753  {
    9754  // There is free space from lastOffset to size.
    9755  const VkDeviceSize unusedRangeSize = size - lastOffset;
    9756  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9757  }
    9758 
    9759  // End of loop.
    9760  lastOffset = size;
    9761  }
    9762  }
    9763  }
    9764 
    9765  PrintDetailedMap_End(json);
    9766 }
    9767 #endif // #if VMA_STATS_STRING_ENABLED
    9768 
    9769 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
    9770  uint32_t currentFrameIndex,
    9771  uint32_t frameInUseCount,
    9772  VkDeviceSize bufferImageGranularity,
    9773  VkDeviceSize allocSize,
    9774  VkDeviceSize allocAlignment,
    9775  bool upperAddress,
    9776  VmaSuballocationType allocType,
    9777  bool canMakeOtherLost,
    9778  uint32_t strategy,
    9779  VmaAllocationRequest* pAllocationRequest)
    9780 {
    9781  VMA_ASSERT(allocSize > 0);
    9782  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    9783  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    9784  VMA_HEAVY_ASSERT(Validate());
    9785  return upperAddress ?
    9786  CreateAllocationRequest_UpperAddress(
    9787  currentFrameIndex, frameInUseCount, bufferImageGranularity,
    9788  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
    9789  CreateAllocationRequest_LowerAddress(
    9790  currentFrameIndex, frameInUseCount, bufferImageGranularity,
    9791  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
    9792 }
    9793 
    9794 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
    9795  uint32_t currentFrameIndex,
    9796  uint32_t frameInUseCount,
    9797  VkDeviceSize bufferImageGranularity,
    9798  VkDeviceSize allocSize,
    9799  VkDeviceSize allocAlignment,
    9800  VmaSuballocationType allocType,
    9801  bool canMakeOtherLost,
    9802  uint32_t strategy,
    9803  VmaAllocationRequest* pAllocationRequest)
    9804 {
    9805  const VkDeviceSize size = GetSize();
    9806  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9807  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9808 
    9809  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9810  {
    9811  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
    9812  return false;
    9813  }
    9814 
    9815  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
    9816  if(allocSize > size)
    9817  {
    9818  return false;
    9819  }
    9820  VkDeviceSize resultBaseOffset = size - allocSize;
    9821  if(!suballocations2nd.empty())
    9822  {
    9823  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9824  resultBaseOffset = lastSuballoc.offset - allocSize;
    9825  if(allocSize > lastSuballoc.offset)
    9826  {
    9827  return false;
    9828  }
    9829  }
    9830 
    9831  // Start from offset equal to end of free space.
    9832  VkDeviceSize resultOffset = resultBaseOffset;
    9833 
    9834  // Apply VMA_DEBUG_MARGIN at the end.
    9835  if(VMA_DEBUG_MARGIN > 0)
    9836  {
    9837  if(resultOffset < VMA_DEBUG_MARGIN)
    9838  {
    9839  return false;
    9840  }
    9841  resultOffset -= VMA_DEBUG_MARGIN;
    9842  }
    9843 
    9844  // Apply alignment.
    9845  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
    9846 
    9847  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
    9848  // Make bigger alignment if necessary.
    9849  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    9850  {
    9851  bool bufferImageGranularityConflict = false;
    9852  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    9853  {
    9854  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    9855  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    9856  {
    9857  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
    9858  {
    9859  bufferImageGranularityConflict = true;
    9860  break;
    9861  }
    9862  }
    9863  else
    9864  // Already on previous page.
    9865  break;
    9866  }
    9867  if(bufferImageGranularityConflict)
    9868  {
    9869  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
    9870  }
    9871  }
    9872 
    9873  // There is enough free space.
    9874  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
    9875  suballocations1st.back().offset + suballocations1st.back().size :
    9876  0;
    9877  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
    9878  {
    9879  // Check previous suballocations for BufferImageGranularity conflicts.
    9880  // If conflict exists, allocation cannot be made here.
    9881  if(bufferImageGranularity > 1)
    9882  {
    9883  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    9884  {
    9885  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    9886  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    9887  {
    9888  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
    9889  {
    9890  return false;
    9891  }
    9892  }
    9893  else
    9894  {
    9895  // Already on next page.
    9896  break;
    9897  }
    9898  }
    9899  }
    9900 
    9901  // All tests passed: Success.
    9902  pAllocationRequest->offset = resultOffset;
    9903  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
    9904  pAllocationRequest->sumItemSize = 0;
    9905  // pAllocationRequest->item unused.
    9906  pAllocationRequest->itemsToMakeLostCount = 0;
    9907  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
    9908  return true;
    9909  }
    9910 
    9911  return false;
    9912 }
    9913 
    9914 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
    9915  uint32_t currentFrameIndex,
    9916  uint32_t frameInUseCount,
    9917  VkDeviceSize bufferImageGranularity,
    9918  VkDeviceSize allocSize,
    9919  VkDeviceSize allocAlignment,
    9920  VmaSuballocationType allocType,
    9921  bool canMakeOtherLost,
    9922  uint32_t strategy,
    9923  VmaAllocationRequest* pAllocationRequest)
    9924 {
    9925  const VkDeviceSize size = GetSize();
    9926  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9927  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9928 
    9929  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9930  {
    9931  // Try to allocate at the end of 1st vector.
    9932 
    9933  VkDeviceSize resultBaseOffset = 0;
    9934  if(!suballocations1st.empty())
    9935  {
    9936  const VmaSuballocation& lastSuballoc = suballocations1st.back();
    9937  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    9938  }
    9939 
    9940  // Start from offset equal to beginning of free space.
    9941  VkDeviceSize resultOffset = resultBaseOffset;
    9942 
    9943  // Apply VMA_DEBUG_MARGIN at the beginning.
    9944  if(VMA_DEBUG_MARGIN > 0)
    9945  {
    9946  resultOffset += VMA_DEBUG_MARGIN;
    9947  }
    9948 
    9949  // Apply alignment.
    9950  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    9951 
    9952  // Check previous suballocations for BufferImageGranularity conflicts.
    9953  // Make bigger alignment if necessary.
    9954  if(bufferImageGranularity > 1 && !suballocations1st.empty())
    9955  {
    9956  bool bufferImageGranularityConflict = false;
    9957  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    9958  {
    9959  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    9960  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    9961  {
    9962  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    9963  {
    9964  bufferImageGranularityConflict = true;
    9965  break;
    9966  }
    9967  }
    9968  else
    9969  // Already on previous page.
    9970  break;
    9971  }
    9972  if(bufferImageGranularityConflict)
    9973  {
    9974  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    9975  }
    9976  }
    9977 
    9978  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
    9979  suballocations2nd.back().offset : size;
    9980 
    9981  // There is enough free space at the end after alignment.
    9982  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
    9983  {
    9984  // Check next suballocations for BufferImageGranularity conflicts.
    9985  // If conflict exists, allocation cannot be made here.
    9986  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9987  {
    9988  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    9989  {
    9990  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    9991  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    9992  {
    9993  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    9994  {
    9995  return false;
    9996  }
    9997  }
    9998  else
    9999  {
    10000  // Already on previous page.
    10001  break;
    10002  }
    10003  }
    10004  }
    10005 
    10006  // All tests passed: Success.
    10007  pAllocationRequest->offset = resultOffset;
    10008  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
    10009  pAllocationRequest->sumItemSize = 0;
    10010  // pAllocationRequest->item, customData unused.
    10011  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
    10012  pAllocationRequest->itemsToMakeLostCount = 0;
    10013  return true;
    10014  }
    10015  }
    10016 
    10017  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
    10018  // beginning of 1st vector as the end of free space.
    10019  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    10020  {
    10021  VMA_ASSERT(!suballocations1st.empty());
    10022 
    10023  VkDeviceSize resultBaseOffset = 0;
    10024  if(!suballocations2nd.empty())
    10025  {
    10026  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    10027  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    10028  }
    10029 
    10030  // Start from offset equal to beginning of free space.
    10031  VkDeviceSize resultOffset = resultBaseOffset;
    10032 
    10033  // Apply VMA_DEBUG_MARGIN at the beginning.
    10034  if(VMA_DEBUG_MARGIN > 0)
    10035  {
    10036  resultOffset += VMA_DEBUG_MARGIN;
    10037  }
    10038 
    10039  // Apply alignment.
    10040  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    10041 
    10042  // Check previous suballocations for BufferImageGranularity conflicts.
    10043  // Make bigger alignment if necessary.
    10044  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    10045  {
    10046  bool bufferImageGranularityConflict = false;
    10047  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
    10048  {
    10049  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
    10050  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    10051  {
    10052  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    10053  {
    10054  bufferImageGranularityConflict = true;
    10055  break;
    10056  }
    10057  }
    10058  else
    10059  // Already on previous page.
    10060  break;
    10061  }
    10062  if(bufferImageGranularityConflict)
    10063  {
    10064  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    10065  }
    10066  }
    10067 
    10068  pAllocationRequest->itemsToMakeLostCount = 0;
    10069  pAllocationRequest->sumItemSize = 0;
    10070  size_t index1st = m_1stNullItemsBeginCount;
    10071 
    10072  if(canMakeOtherLost)
    10073  {
    10074  while(index1st < suballocations1st.size() &&
    10075  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
    10076  {
    10077  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
    10078  const VmaSuballocation& suballoc = suballocations1st[index1st];
    10079  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
    10080  {
    10081  // No problem.
    10082  }
    10083  else
    10084  {
    10085  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    10086  if(suballoc.hAllocation->CanBecomeLost() &&
    10087  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    10088  {
    10089  ++pAllocationRequest->itemsToMakeLostCount;
    10090  pAllocationRequest->sumItemSize += suballoc.size;
    10091  }
    10092  else
    10093  {
    10094  return false;
    10095  }
    10096  }
    10097  ++index1st;
    10098  }
    10099 
    10100  // Check next suballocations for BufferImageGranularity conflicts.
    10101  // If conflict exists, we must mark more allocations lost or fail.
    10102  if(bufferImageGranularity > 1)
    10103  {
    10104  while(index1st < suballocations1st.size())
    10105  {
    10106  const VmaSuballocation& suballoc = suballocations1st[index1st];
    10107  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
    10108  {
    10109  if(suballoc.hAllocation != VK_NULL_HANDLE)
    10110  {
    10111  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
    10112  if(suballoc.hAllocation->CanBecomeLost() &&
    10113  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    10114  {
    10115  ++pAllocationRequest->itemsToMakeLostCount;
    10116  pAllocationRequest->sumItemSize += suballoc.size;
    10117  }
    10118  else
    10119  {
    10120  return false;
    10121  }
    10122  }
    10123  }
    10124  else
    10125  {
    10126  // Already on next page.
    10127  break;
    10128  }
    10129  ++index1st;
    10130  }
    10131  }
    10132 
    10133  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
    10134  if(index1st == suballocations1st.size() &&
    10135  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
    10136  {
    10137  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
    10138  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
    10139  }
    10140  }
    10141 
    10142  // There is enough free space at the end after alignment.
    10143  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
    10144  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
    10145  {
    10146  // Check next suballocations for BufferImageGranularity conflicts.
    10147  // If conflict exists, allocation cannot be made here.
    10148  if(bufferImageGranularity > 1)
    10149  {
    10150  for(size_t nextSuballocIndex = index1st;
    10151  nextSuballocIndex < suballocations1st.size();
    10152  nextSuballocIndex++)
    10153  {
    10154  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
    10155  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    10156  {
    10157  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    10158  {
    10159  return false;
    10160  }
    10161  }
    10162  else
    10163  {
    10164  // Already on next page.
    10165  break;
    10166  }
    10167  }
    10168  }
    10169 
    10170  // All tests passed: Success.
    10171  pAllocationRequest->offset = resultOffset;
    10172  pAllocationRequest->sumFreeSize =
    10173  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
    10174  - resultBaseOffset
    10175  - pAllocationRequest->sumItemSize;
    10176  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
    10177  // pAllocationRequest->item, customData unused.
    10178  return true;
    10179  }
    10180  }
    10181 
    10182  return false;
    10183 }
    10184 
    10185 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
    10186  uint32_t currentFrameIndex,
    10187  uint32_t frameInUseCount,
    10188  VmaAllocationRequest* pAllocationRequest)
    10189 {
    10190  if(pAllocationRequest->itemsToMakeLostCount == 0)
    10191  {
    10192  return true;
    10193  }
    10194 
    10195  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
    10196 
    10197  // We always start from 1st.
    10198  SuballocationVectorType* suballocations = &AccessSuballocations1st();
    10199  size_t index = m_1stNullItemsBeginCount;
    10200  size_t madeLostCount = 0;
    10201  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
    10202  {
    10203  if(index == suballocations->size())
    10204  {
    10205  index = 0;
    10206  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
    10207  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    10208  {
    10209  suballocations = &AccessSuballocations2nd();
    10210  }
    10211  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
    10212  // suballocations continues pointing at AccessSuballocations1st().
    10213  VMA_ASSERT(!suballocations->empty());
    10214  }
    10215  VmaSuballocation& suballoc = (*suballocations)[index];
    10216  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    10217  {
    10218  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    10219  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
    10220  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    10221  {
    10222  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10223  suballoc.hAllocation = VK_NULL_HANDLE;
    10224  m_SumFreeSize += suballoc.size;
    10225  if(suballocations == &AccessSuballocations1st())
    10226  {
    10227  ++m_1stNullItemsMiddleCount;
    10228  }
    10229  else
    10230  {
    10231  ++m_2ndNullItemsCount;
    10232  }
    10233  ++madeLostCount;
    10234  }
    10235  else
    10236  {
    10237  return false;
    10238  }
    10239  }
    10240  ++index;
    10241  }
    10242 
    10243  CleanupAfterFree();
    10244  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
    10245 
    10246  return true;
    10247 }
    10248 
    10249 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    10250 {
    10251  uint32_t lostAllocationCount = 0;
    10252 
    10253  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10254  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    10255  {
    10256  VmaSuballocation& suballoc = suballocations1st[i];
    10257  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    10258  suballoc.hAllocation->CanBecomeLost() &&
    10259  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    10260  {
    10261  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10262  suballoc.hAllocation = VK_NULL_HANDLE;
    10263  ++m_1stNullItemsMiddleCount;
    10264  m_SumFreeSize += suballoc.size;
    10265  ++lostAllocationCount;
    10266  }
    10267  }
    10268 
    10269  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10270  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    10271  {
    10272  VmaSuballocation& suballoc = suballocations2nd[i];
    10273  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    10274  suballoc.hAllocation->CanBecomeLost() &&
    10275  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    10276  {
    10277  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10278  suballoc.hAllocation = VK_NULL_HANDLE;
    10279  ++m_2ndNullItemsCount;
    10280  m_SumFreeSize += suballoc.size;
    10281  ++lostAllocationCount;
    10282  }
    10283  }
    10284 
    10285  if(lostAllocationCount)
    10286  {
    10287  CleanupAfterFree();
    10288  }
    10289 
    10290  return lostAllocationCount;
    10291 }
    10292 
    10293 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
    10294 {
    10295  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10296  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    10297  {
    10298  const VmaSuballocation& suballoc = suballocations1st[i];
    10299  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    10300  {
    10301  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    10302  {
    10303  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    10304  return VK_ERROR_VALIDATION_FAILED_EXT;
    10305  }
    10306  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    10307  {
    10308  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    10309  return VK_ERROR_VALIDATION_FAILED_EXT;
    10310  }
    10311  }
    10312  }
    10313 
    10314  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10315  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    10316  {
    10317  const VmaSuballocation& suballoc = suballocations2nd[i];
    10318  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    10319  {
    10320  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    10321  {
    10322  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    10323  return VK_ERROR_VALIDATION_FAILED_EXT;
    10324  }
    10325  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    10326  {
    10327  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    10328  return VK_ERROR_VALIDATION_FAILED_EXT;
    10329  }
    10330  }
    10331  }
    10332 
    10333  return VK_SUCCESS;
    10334 }
    10335 
    10336 void VmaBlockMetadata_Linear::Alloc(
    10337  const VmaAllocationRequest& request,
    10338  VmaSuballocationType type,
    10339  VkDeviceSize allocSize,
    10340  VmaAllocation hAllocation)
    10341 {
    10342  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
    10343 
    10344  switch(request.type)
    10345  {
    10346  case VmaAllocationRequestType::UpperAddress:
    10347  {
    10348  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
    10349  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
    10350  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10351  suballocations2nd.push_back(newSuballoc);
    10352  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
    10353  }
    10354  break;
    10355  case VmaAllocationRequestType::EndOf1st:
    10356  {
    10357  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10358 
    10359  VMA_ASSERT(suballocations1st.empty() ||
    10360  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
    10361  // Check if it fits before the end of the block.
    10362  VMA_ASSERT(request.offset + allocSize <= GetSize());
    10363 
    10364  suballocations1st.push_back(newSuballoc);
    10365  }
    10366  break;
    10367  case VmaAllocationRequestType::EndOf2nd:
    10368  {
    10369  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10370  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
    10371  VMA_ASSERT(!suballocations1st.empty() &&
    10372  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
    10373  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10374 
    10375  switch(m_2ndVectorMode)
    10376  {
    10377  case SECOND_VECTOR_EMPTY:
    10378  // First allocation from second part ring buffer.
    10379  VMA_ASSERT(suballocations2nd.empty());
    10380  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
    10381  break;
    10382  case SECOND_VECTOR_RING_BUFFER:
    10383  // 2-part ring buffer is already started.
    10384  VMA_ASSERT(!suballocations2nd.empty());
    10385  break;
    10386  case SECOND_VECTOR_DOUBLE_STACK:
    10387  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
    10388  break;
    10389  default:
    10390  VMA_ASSERT(0);
    10391  }
    10392 
    10393  suballocations2nd.push_back(newSuballoc);
    10394  }
    10395  break;
    10396  default:
    10397  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
    10398  }
    10399 
    10400  m_SumFreeSize -= newSuballoc.size;
    10401 }
    10402 
    10403 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
    10404 {
    10405  FreeAtOffset(allocation->GetOffset());
    10406 }
    10407 
    10408 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
    10409 {
    10410  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10411  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10412 
    10413  if(!suballocations1st.empty())
    10414  {
    10415  // First allocation: Mark it as next empty at the beginning.
    10416  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    10417  if(firstSuballoc.offset == offset)
    10418  {
    10419  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10420  firstSuballoc.hAllocation = VK_NULL_HANDLE;
    10421  m_SumFreeSize += firstSuballoc.size;
    10422  ++m_1stNullItemsBeginCount;
    10423  CleanupAfterFree();
    10424  return;
    10425  }
    10426  }
    10427 
    10428  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
    10429  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
    10430  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    10431  {
    10432  VmaSuballocation& lastSuballoc = suballocations2nd.back();
    10433  if(lastSuballoc.offset == offset)
    10434  {
    10435  m_SumFreeSize += lastSuballoc.size;
    10436  suballocations2nd.pop_back();
    10437  CleanupAfterFree();
    10438  return;
    10439  }
    10440  }
    10441  // Last allocation in 1st vector.
    10442  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
    10443  {
    10444  VmaSuballocation& lastSuballoc = suballocations1st.back();
    10445  if(lastSuballoc.offset == offset)
    10446  {
    10447  m_SumFreeSize += lastSuballoc.size;
    10448  suballocations1st.pop_back();
    10449  CleanupAfterFree();
    10450  return;
    10451  }
    10452  }
    10453 
    10454  // Item from the middle of 1st vector.
    10455  {
    10456  VmaSuballocation refSuballoc;
    10457  refSuballoc.offset = offset;
    10458  // Rest of members stays uninitialized intentionally for better performance.
    10459  SuballocationVectorType::iterator it = VmaBinaryFindSorted(
    10460  suballocations1st.begin() + m_1stNullItemsBeginCount,
    10461  suballocations1st.end(),
    10462  refSuballoc,
    10463  VmaSuballocationOffsetLess());
    10464  if(it != suballocations1st.end())
    10465  {
    10466  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    10467  it->hAllocation = VK_NULL_HANDLE;
    10468  ++m_1stNullItemsMiddleCount;
    10469  m_SumFreeSize += it->size;
    10470  CleanupAfterFree();
    10471  return;
    10472  }
    10473  }
    10474 
    10475  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
    10476  {
    10477  // Item from the middle of 2nd vector.
    10478  VmaSuballocation refSuballoc;
    10479  refSuballoc.offset = offset;
    10480  // Rest of members stays uninitialized intentionally for better performance.
    10481  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
    10482  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
    10483  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
    10484  if(it != suballocations2nd.end())
    10485  {
    10486  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    10487  it->hAllocation = VK_NULL_HANDLE;
    10488  ++m_2ndNullItemsCount;
    10489  m_SumFreeSize += it->size;
    10490  CleanupAfterFree();
    10491  return;
    10492  }
    10493  }
    10494 
    10495  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
    10496 }
    10497 
    10498 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
    10499 {
    10500  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    10501  const size_t suballocCount = AccessSuballocations1st().size();
    10502  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
    10503 }
    10504 
    10505 void VmaBlockMetadata_Linear::CleanupAfterFree()
    10506 {
    10507  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10508  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10509 
    10510  if(IsEmpty())
    10511  {
    10512  suballocations1st.clear();
    10513  suballocations2nd.clear();
    10514  m_1stNullItemsBeginCount = 0;
    10515  m_1stNullItemsMiddleCount = 0;
    10516  m_2ndNullItemsCount = 0;
    10517  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    10518  }
    10519  else
    10520  {
    10521  const size_t suballoc1stCount = suballocations1st.size();
    10522  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    10523  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
    10524 
    10525  // Find more null items at the beginning of 1st vector.
    10526  while(m_1stNullItemsBeginCount < suballoc1stCount &&
    10527  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    10528  {
    10529  ++m_1stNullItemsBeginCount;
    10530  --m_1stNullItemsMiddleCount;
    10531  }
    10532 
    10533  // Find more null items at the end of 1st vector.
    10534  while(m_1stNullItemsMiddleCount > 0 &&
    10535  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
    10536  {
    10537  --m_1stNullItemsMiddleCount;
    10538  suballocations1st.pop_back();
    10539  }
    10540 
    10541  // Find more null items at the end of 2nd vector.
    10542  while(m_2ndNullItemsCount > 0 &&
    10543  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
    10544  {
    10545  --m_2ndNullItemsCount;
    10546  suballocations2nd.pop_back();
    10547  }
    10548 
    10549  // Find more null items at the beginning of 2nd vector.
    10550  while(m_2ndNullItemsCount > 0 &&
    10551  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
    10552  {
    10553  --m_2ndNullItemsCount;
    10554  VmaVectorRemove(suballocations2nd, 0);
    10555  }
    10556 
    10557  if(ShouldCompact1st())
    10558  {
    10559  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
    10560  size_t srcIndex = m_1stNullItemsBeginCount;
    10561  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
    10562  {
    10563  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
    10564  {
    10565  ++srcIndex;
    10566  }
    10567  if(dstIndex != srcIndex)
    10568  {
    10569  suballocations1st[dstIndex] = suballocations1st[srcIndex];
    10570  }
    10571  ++srcIndex;
    10572  }
    10573  suballocations1st.resize(nonNullItemCount);
    10574  m_1stNullItemsBeginCount = 0;
    10575  m_1stNullItemsMiddleCount = 0;
    10576  }
    10577 
    10578  // 2nd vector became empty.
    10579  if(suballocations2nd.empty())
    10580  {
    10581  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    10582  }
    10583 
    10584  // 1st vector became empty.
    10585  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
    10586  {
    10587  suballocations1st.clear();
    10588  m_1stNullItemsBeginCount = 0;
    10589 
    10590  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    10591  {
    10592  // Swap 1st with 2nd. Now 2nd is empty.
    10593  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    10594  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
    10595  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
    10596  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    10597  {
    10598  ++m_1stNullItemsBeginCount;
    10599  --m_1stNullItemsMiddleCount;
    10600  }
    10601  m_2ndNullItemsCount = 0;
    10602  m_1stVectorIndex ^= 1;
    10603  }
    10604  }
    10605  }
    10606 
    10607  VMA_HEAVY_ASSERT(Validate());
    10608 }
    10609 
    10610 
    10612 // class VmaBlockMetadata_Buddy
    10613 
    10614 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
    10615  VmaBlockMetadata(hAllocator),
    10616  m_Root(VMA_NULL),
    10617  m_AllocationCount(0),
    10618  m_FreeCount(1),
    10619  m_SumFreeSize(0)
    10620 {
    10621  memset(m_FreeList, 0, sizeof(m_FreeList));
    10622 }
    10623 
    10624 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
    10625 {
    10626  DeleteNode(m_Root);
    10627 }
    10628 
    10629 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
    10630 {
    10631  VmaBlockMetadata::Init(size);
    10632 
    10633  m_UsableSize = VmaPrevPow2(size);
    10634  m_SumFreeSize = m_UsableSize;
    10635 
    10636  // Calculate m_LevelCount.
    10637  m_LevelCount = 1;
    10638  while(m_LevelCount < MAX_LEVELS &&
    10639  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
    10640  {
    10641  ++m_LevelCount;
    10642  }
    10643 
    10644  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
    10645  rootNode->offset = 0;
    10646  rootNode->type = Node::TYPE_FREE;
    10647  rootNode->parent = VMA_NULL;
    10648  rootNode->buddy = VMA_NULL;
    10649 
    10650  m_Root = rootNode;
    10651  AddToFreeListFront(0, rootNode);
    10652 }
    10653 
    10654 bool VmaBlockMetadata_Buddy::Validate() const
    10655 {
    10656  // Validate tree.
    10657  ValidationContext ctx;
    10658  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
    10659  {
    10660  VMA_VALIDATE(false && "ValidateNode failed.");
    10661  }
    10662  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
    10663  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
    10664 
    10665  // Validate free node lists.
    10666  for(uint32_t level = 0; level < m_LevelCount; ++level)
    10667  {
    10668  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
    10669  m_FreeList[level].front->free.prev == VMA_NULL);
    10670 
    10671  for(Node* node = m_FreeList[level].front;
    10672  node != VMA_NULL;
    10673  node = node->free.next)
    10674  {
    10675  VMA_VALIDATE(node->type == Node::TYPE_FREE);
    10676 
    10677  if(node->free.next == VMA_NULL)
    10678  {
    10679  VMA_VALIDATE(m_FreeList[level].back == node);
    10680  }
    10681  else
    10682  {
    10683  VMA_VALIDATE(node->free.next->free.prev == node);
    10684  }
    10685  }
    10686  }
    10687 
    10688  // Validate that free lists ar higher levels are empty.
    10689  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
    10690  {
    10691  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
    10692  }
    10693 
    10694  return true;
    10695 }
    10696 
    10697 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
    10698 {
    10699  for(uint32_t level = 0; level < m_LevelCount; ++level)
    10700  {
    10701  if(m_FreeList[level].front != VMA_NULL)
    10702  {
    10703  return LevelToNodeSize(level);
    10704  }
    10705  }
    10706  return 0;
    10707 }
    10708 
    10709 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    10710 {
    10711  const VkDeviceSize unusableSize = GetUnusableSize();
    10712 
    10713  outInfo.blockCount = 1;
    10714 
    10715  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
    10716  outInfo.usedBytes = outInfo.unusedBytes = 0;
    10717 
    10718  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
    10719  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
    10720  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
    10721 
    10722  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
    10723 
    10724  if(unusableSize > 0)
    10725  {
    10726  ++outInfo.unusedRangeCount;
    10727  outInfo.unusedBytes += unusableSize;
    10728  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
    10729  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
    10730  }
    10731 }
    10732 
    10733 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
    10734 {
    10735  const VkDeviceSize unusableSize = GetUnusableSize();
    10736 
    10737  inoutStats.size += GetSize();
    10738  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
    10739  inoutStats.allocationCount += m_AllocationCount;
    10740  inoutStats.unusedRangeCount += m_FreeCount;
    10741  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    10742 
    10743  if(unusableSize > 0)
    10744  {
    10745  ++inoutStats.unusedRangeCount;
    10746  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
    10747  }
    10748 }
    10749 
    10750 #if VMA_STATS_STRING_ENABLED
    10751 
    10752 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
    10753 {
    10754  // TODO optimize
    10755  VmaStatInfo stat;
    10756  CalcAllocationStatInfo(stat);
    10757 
    10758  PrintDetailedMap_Begin(
    10759  json,
    10760  stat.unusedBytes,
    10761  stat.allocationCount,
    10762  stat.unusedRangeCount);
    10763 
    10764  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
    10765 
    10766  const VkDeviceSize unusableSize = GetUnusableSize();
    10767  if(unusableSize > 0)
    10768  {
    10769  PrintDetailedMap_UnusedRange(json,
    10770  m_UsableSize, // offset
    10771  unusableSize); // size
    10772  }
    10773 
    10774  PrintDetailedMap_End(json);
    10775 }
    10776 
    10777 #endif // #if VMA_STATS_STRING_ENABLED
    10778 
    10779 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
    10780  uint32_t currentFrameIndex,
    10781  uint32_t frameInUseCount,
    10782  VkDeviceSize bufferImageGranularity,
    10783  VkDeviceSize allocSize,
    10784  VkDeviceSize allocAlignment,
    10785  bool upperAddress,
    10786  VmaSuballocationType allocType,
    10787  bool canMakeOtherLost,
    10788  uint32_t strategy,
    10789  VmaAllocationRequest* pAllocationRequest)
    10790 {
    10791  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
    10792 
    10793  // Simple way to respect bufferImageGranularity. May be optimized some day.
    10794  // Whenever it might be an OPTIMAL image...
    10795  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
    10796  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    10797  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
    10798  {
    10799  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
    10800  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
    10801  }
    10802 
    10803  if(allocSize > m_UsableSize)
    10804  {
    10805  return false;
    10806  }
    10807 
    10808  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    10809  for(uint32_t level = targetLevel + 1; level--; )
    10810  {
    10811  for(Node* freeNode = m_FreeList[level].front;
    10812  freeNode != VMA_NULL;
    10813  freeNode = freeNode->free.next)
    10814  {
    10815  if(freeNode->offset % allocAlignment == 0)
    10816  {
    10817  pAllocationRequest->type = VmaAllocationRequestType::Normal;
    10818  pAllocationRequest->offset = freeNode->offset;
    10819  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
    10820  pAllocationRequest->sumItemSize = 0;
    10821  pAllocationRequest->itemsToMakeLostCount = 0;
    10822  pAllocationRequest->customData = (void*)(uintptr_t)level;
    10823  return true;
    10824  }
    10825  }
    10826  }
    10827 
    10828  return false;
    10829 }
    10830 
    10831 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
    10832  uint32_t currentFrameIndex,
    10833  uint32_t frameInUseCount,
    10834  VmaAllocationRequest* pAllocationRequest)
    10835 {
    10836  /*
    10837  Lost allocations are not supported in buddy allocator at the moment.
    10838  Support might be added in the future.
    10839  */
    10840  return pAllocationRequest->itemsToMakeLostCount == 0;
    10841 }
    10842 
    10843 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    10844 {
    10845  /*
    10846  Lost allocations are not supported in buddy allocator at the moment.
    10847  Support might be added in the future.
    10848  */
    10849  return 0;
    10850 }
    10851 
    10852 void VmaBlockMetadata_Buddy::Alloc(
    10853  const VmaAllocationRequest& request,
    10854  VmaSuballocationType type,
    10855  VkDeviceSize allocSize,
    10856  VmaAllocation hAllocation)
    10857 {
    10858  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
    10859 
    10860  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    10861  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
    10862 
    10863  Node* currNode = m_FreeList[currLevel].front;
    10864  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    10865  while(currNode->offset != request.offset)
    10866  {
    10867  currNode = currNode->free.next;
    10868  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    10869  }
    10870 
    10871  // Go down, splitting free nodes.
    10872  while(currLevel < targetLevel)
    10873  {
    10874  // currNode is already first free node at currLevel.
    10875  // Remove it from list of free nodes at this currLevel.
    10876  RemoveFromFreeList(currLevel, currNode);
    10877 
    10878  const uint32_t childrenLevel = currLevel + 1;
    10879 
    10880  // Create two free sub-nodes.
    10881  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
    10882  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
    10883 
    10884  leftChild->offset = currNode->offset;
    10885  leftChild->type = Node::TYPE_FREE;
    10886  leftChild->parent = currNode;
    10887  leftChild->buddy = rightChild;
    10888 
    10889  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
    10890  rightChild->type = Node::TYPE_FREE;
    10891  rightChild->parent = currNode;
    10892  rightChild->buddy = leftChild;
    10893 
    10894  // Convert current currNode to split type.
    10895  currNode->type = Node::TYPE_SPLIT;
    10896  currNode->split.leftChild = leftChild;
    10897 
    10898  // Add child nodes to free list. Order is important!
    10899  AddToFreeListFront(childrenLevel, rightChild);
    10900  AddToFreeListFront(childrenLevel, leftChild);
    10901 
    10902  ++m_FreeCount;
    10903  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
    10904  ++currLevel;
    10905  currNode = m_FreeList[currLevel].front;
    10906 
    10907  /*
    10908  We can be sure that currNode, as left child of node previously split,
    10909  also fullfills the alignment requirement.
    10910  */
    10911  }
    10912 
    10913  // Remove from free list.
    10914  VMA_ASSERT(currLevel == targetLevel &&
    10915  currNode != VMA_NULL &&
    10916  currNode->type == Node::TYPE_FREE);
    10917  RemoveFromFreeList(currLevel, currNode);
    10918 
    10919  // Convert to allocation node.
    10920  currNode->type = Node::TYPE_ALLOCATION;
    10921  currNode->allocation.alloc = hAllocation;
    10922 
    10923  ++m_AllocationCount;
    10924  --m_FreeCount;
    10925  m_SumFreeSize -= allocSize;
    10926 }
    10927 
    10928 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
    10929 {
    10930  if(node->type == Node::TYPE_SPLIT)
    10931  {
    10932  DeleteNode(node->split.leftChild->buddy);
    10933  DeleteNode(node->split.leftChild);
    10934  }
    10935 
    10936  vma_delete(GetAllocationCallbacks(), node);
    10937 }
    10938 
    10939 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
    10940 {
    10941  VMA_VALIDATE(level < m_LevelCount);
    10942  VMA_VALIDATE(curr->parent == parent);
    10943  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
    10944  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
    10945  switch(curr->type)
    10946  {
    10947  case Node::TYPE_FREE:
    10948  // curr->free.prev, next are validated separately.
    10949  ctx.calculatedSumFreeSize += levelNodeSize;
    10950  ++ctx.calculatedFreeCount;
    10951  break;
    10952  case Node::TYPE_ALLOCATION:
    10953  ++ctx.calculatedAllocationCount;
    10954  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
    10955  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
    10956  break;
    10957  case Node::TYPE_SPLIT:
    10958  {
    10959  const uint32_t childrenLevel = level + 1;
    10960  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
    10961  const Node* const leftChild = curr->split.leftChild;
    10962  VMA_VALIDATE(leftChild != VMA_NULL);
    10963  VMA_VALIDATE(leftChild->offset == curr->offset);
    10964  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
    10965  {
    10966  VMA_VALIDATE(false && "ValidateNode for left child failed.");
    10967  }
    10968  const Node* const rightChild = leftChild->buddy;
    10969  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
    10970  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
    10971  {
    10972  VMA_VALIDATE(false && "ValidateNode for right child failed.");
    10973  }
    10974  }
    10975  break;
    10976  default:
    10977  return false;
    10978  }
    10979 
    10980  return true;
    10981 }
    10982 
    10983 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
    10984 {
    10985  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
    10986  uint32_t level = 0;
    10987  VkDeviceSize currLevelNodeSize = m_UsableSize;
    10988  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
    10989  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
    10990  {
    10991  ++level;
    10992  currLevelNodeSize = nextLevelNodeSize;
    10993  nextLevelNodeSize = currLevelNodeSize >> 1;
    10994  }
    10995  return level;
    10996 }
    10997 
    10998 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
    10999 {
    11000  // Find node and level.
    11001  Node* node = m_Root;
    11002  VkDeviceSize nodeOffset = 0;
    11003  uint32_t level = 0;
    11004  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
    11005  while(node->type == Node::TYPE_SPLIT)
    11006  {
    11007  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
    11008  if(offset < nodeOffset + nextLevelSize)
    11009  {
    11010  node = node->split.leftChild;
    11011  }
    11012  else
    11013  {
    11014  node = node->split.leftChild->buddy;
    11015  nodeOffset += nextLevelSize;
    11016  }
    11017  ++level;
    11018  levelNodeSize = nextLevelSize;
    11019  }
    11020 
    11021  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
    11022  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
    11023 
    11024  ++m_FreeCount;
    11025  --m_AllocationCount;
    11026  m_SumFreeSize += alloc->GetSize();
    11027 
    11028  node->type = Node::TYPE_FREE;
    11029 
    11030  // Join free nodes if possible.
    11031  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
    11032  {
    11033  RemoveFromFreeList(level, node->buddy);
    11034  Node* const parent = node->parent;
    11035 
    11036  vma_delete(GetAllocationCallbacks(), node->buddy);
    11037  vma_delete(GetAllocationCallbacks(), node);
    11038  parent->type = Node::TYPE_FREE;
    11039 
    11040  node = parent;
    11041  --level;
    11042  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
    11043  --m_FreeCount;
    11044  }
    11045 
    11046  AddToFreeListFront(level, node);
    11047 }
    11048 
    11049 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
    11050 {
    11051  switch(node->type)
    11052  {
    11053  case Node::TYPE_FREE:
    11054  ++outInfo.unusedRangeCount;
    11055  outInfo.unusedBytes += levelNodeSize;
    11056  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
    11057  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
    11058  break;
    11059  case Node::TYPE_ALLOCATION:
    11060  {
    11061  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    11062  ++outInfo.allocationCount;
    11063  outInfo.usedBytes += allocSize;
    11064  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
    11065  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
    11066 
    11067  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
    11068  if(unusedRangeSize > 0)
    11069  {
    11070  ++outInfo.unusedRangeCount;
    11071  outInfo.unusedBytes += unusedRangeSize;
    11072  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
    11073  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
    11074  }
    11075  }
    11076  break;
    11077  case Node::TYPE_SPLIT:
    11078  {
    11079  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    11080  const Node* const leftChild = node->split.leftChild;
    11081  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
    11082  const Node* const rightChild = leftChild->buddy;
    11083  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
    11084  }
    11085  break;
    11086  default:
    11087  VMA_ASSERT(0);
    11088  }
    11089 }
    11090 
    11091 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
    11092 {
    11093  VMA_ASSERT(node->type == Node::TYPE_FREE);
    11094 
    11095  // List is empty.
    11096  Node* const frontNode = m_FreeList[level].front;
    11097  if(frontNode == VMA_NULL)
    11098  {
    11099  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
    11100  node->free.prev = node->free.next = VMA_NULL;
    11101  m_FreeList[level].front = m_FreeList[level].back = node;
    11102  }
    11103  else
    11104  {
    11105  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
    11106  node->free.prev = VMA_NULL;
    11107  node->free.next = frontNode;
    11108  frontNode->free.prev = node;
    11109  m_FreeList[level].front = node;
    11110  }
    11111 }
    11112 
    11113 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
    11114 {
    11115  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
    11116 
    11117  // It is at the front.
    11118  if(node->free.prev == VMA_NULL)
    11119  {
    11120  VMA_ASSERT(m_FreeList[level].front == node);
    11121  m_FreeList[level].front = node->free.next;
    11122  }
    11123  else
    11124  {
    11125  Node* const prevFreeNode = node->free.prev;
    11126  VMA_ASSERT(prevFreeNode->free.next == node);
    11127  prevFreeNode->free.next = node->free.next;
    11128  }
    11129 
    11130  // It is at the back.
    11131  if(node->free.next == VMA_NULL)
    11132  {
    11133  VMA_ASSERT(m_FreeList[level].back == node);
    11134  m_FreeList[level].back = node->free.prev;
    11135  }
    11136  else
    11137  {
    11138  Node* const nextFreeNode = node->free.next;
    11139  VMA_ASSERT(nextFreeNode->free.prev == node);
    11140  nextFreeNode->free.prev = node->free.prev;
    11141  }
    11142 }
    11143 
    11144 #if VMA_STATS_STRING_ENABLED
    11145 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
    11146 {
    11147  switch(node->type)
    11148  {
    11149  case Node::TYPE_FREE:
    11150  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
    11151  break;
    11152  case Node::TYPE_ALLOCATION:
    11153  {
    11154  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
    11155  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    11156  if(allocSize < levelNodeSize)
    11157  {
    11158  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
    11159  }
    11160  }
    11161  break;
    11162  case Node::TYPE_SPLIT:
    11163  {
    11164  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    11165  const Node* const leftChild = node->split.leftChild;
    11166  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
    11167  const Node* const rightChild = leftChild->buddy;
    11168  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
    11169  }
    11170  break;
    11171  default:
    11172  VMA_ASSERT(0);
    11173  }
    11174 }
    11175 #endif // #if VMA_STATS_STRING_ENABLED
    11176 
    11177 
    11179 // class VmaDeviceMemoryBlock
    11180 
    11181 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    11182  m_pMetadata(VMA_NULL),
    11183  m_MemoryTypeIndex(UINT32_MAX),
    11184  m_Id(0),
    11185  m_hMemory(VK_NULL_HANDLE),
    11186  m_MapCount(0),
    11187  m_pMappedData(VMA_NULL)
    11188 {
    11189 }
    11190 
    11191 void VmaDeviceMemoryBlock::Init(
    11192  VmaAllocator hAllocator,
    11193  VmaPool hParentPool,
    11194  uint32_t newMemoryTypeIndex,
    11195  VkDeviceMemory newMemory,
    11196  VkDeviceSize newSize,
    11197  uint32_t id,
    11198  uint32_t algorithm)
    11199 {
    11200  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    11201 
    11202  m_hParentPool = hParentPool;
    11203  m_MemoryTypeIndex = newMemoryTypeIndex;
    11204  m_Id = id;
    11205  m_hMemory = newMemory;
    11206 
    11207  switch(algorithm)
    11208  {
    11210  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
    11211  break;
    11213  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
    11214  break;
    11215  default:
    11216  VMA_ASSERT(0);
    11217  // Fall-through.
    11218  case 0:
    11219  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
    11220  }
    11221  m_pMetadata->Init(newSize);
    11222 }
    11223 
    11224 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    11225 {
    11226  // This is the most important assert in the entire library.
    11227  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    11228  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    11229 
    11230  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    11231  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
    11232  m_hMemory = VK_NULL_HANDLE;
    11233 
    11234  vma_delete(allocator, m_pMetadata);
    11235  m_pMetadata = VMA_NULL;
    11236 }
    11237 
    11238 bool VmaDeviceMemoryBlock::Validate() const
    11239 {
    11240  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
    11241  (m_pMetadata->GetSize() != 0));
    11242 
    11243  return m_pMetadata->Validate();
    11244 }
    11245 
    11246 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
    11247 {
    11248  void* pData = nullptr;
    11249  VkResult res = Map(hAllocator, 1, &pData);
    11250  if(res != VK_SUCCESS)
    11251  {
    11252  return res;
    11253  }
    11254 
    11255  res = m_pMetadata->CheckCorruption(pData);
    11256 
    11257  Unmap(hAllocator, 1);
    11258 
    11259  return res;
    11260 }
    11261 
    11262 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
    11263 {
    11264  if(count == 0)
    11265  {
    11266  return VK_SUCCESS;
    11267  }
    11268 
    11269  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11270  if(m_MapCount != 0)
    11271  {
    11272  m_MapCount += count;
    11273  VMA_ASSERT(m_pMappedData != VMA_NULL);
    11274  if(ppData != VMA_NULL)
    11275  {
    11276  *ppData = m_pMappedData;
    11277  }
    11278  return VK_SUCCESS;
    11279  }
    11280  else
    11281  {
    11282  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    11283  hAllocator->m_hDevice,
    11284  m_hMemory,
    11285  0, // offset
    11286  VK_WHOLE_SIZE,
    11287  0, // flags
    11288  &m_pMappedData);
    11289  if(result == VK_SUCCESS)
    11290  {
    11291  if(ppData != VMA_NULL)
    11292  {
    11293  *ppData = m_pMappedData;
    11294  }
    11295  m_MapCount = count;
    11296  }
    11297  return result;
    11298  }
    11299 }
    11300 
    11301 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
    11302 {
    11303  if(count == 0)
    11304  {
    11305  return;
    11306  }
    11307 
    11308  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11309  if(m_MapCount >= count)
    11310  {
    11311  m_MapCount -= count;
    11312  if(m_MapCount == 0)
    11313  {
    11314  m_pMappedData = VMA_NULL;
    11315  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
    11316  }
    11317  }
    11318  else
    11319  {
    11320  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    11321  }
    11322 }
    11323 
    11324 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    11325 {
    11326  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    11327  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    11328 
    11329  void* pData;
    11330  VkResult res = Map(hAllocator, 1, &pData);
    11331  if(res != VK_SUCCESS)
    11332  {
    11333  return res;
    11334  }
    11335 
    11336  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
    11337  VmaWriteMagicValue(pData, allocOffset + allocSize);
    11338 
    11339  Unmap(hAllocator, 1);
    11340 
    11341  return VK_SUCCESS;
    11342 }
    11343 
    11344 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    11345 {
    11346  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    11347  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    11348 
    11349  void* pData;
    11350  VkResult res = Map(hAllocator, 1, &pData);
    11351  if(res != VK_SUCCESS)
    11352  {
    11353  return res;
    11354  }
    11355 
    11356  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
    11357  {
    11358  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
    11359  }
    11360  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
    11361  {
    11362  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
    11363  }
    11364 
    11365  Unmap(hAllocator, 1);
    11366 
    11367  return VK_SUCCESS;
    11368 }
    11369 
    11370 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
    11371  const VmaAllocator hAllocator,
    11372  const VmaAllocation hAllocation,
    11373  VkBuffer hBuffer)
    11374 {
    11375  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    11376  hAllocation->GetBlock() == this);
    11377  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    11378  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11379  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
    11380  hAllocator->m_hDevice,
    11381  hBuffer,
    11382  m_hMemory,
    11383  hAllocation->GetOffset());
    11384 }
    11385 
    11386 VkResult VmaDeviceMemoryBlock::BindImageMemory(
    11387  const VmaAllocator hAllocator,
    11388  const VmaAllocation hAllocation,
    11389  VkImage hImage)
    11390 {
    11391  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    11392  hAllocation->GetBlock() == this);
    11393  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    11394  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11395  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
    11396  hAllocator->m_hDevice,
    11397  hImage,
    11398  m_hMemory,
    11399  hAllocation->GetOffset());
    11400 }
    11401 
    11402 static void InitStatInfo(VmaStatInfo& outInfo)
    11403 {
    11404  memset(&outInfo, 0, sizeof(outInfo));
    11405  outInfo.allocationSizeMin = UINT64_MAX;
    11406  outInfo.unusedRangeSizeMin = UINT64_MAX;
    11407 }
    11408 
    11409 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    11410 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    11411 {
    11412  inoutInfo.blockCount += srcInfo.blockCount;
    11413  inoutInfo.allocationCount += srcInfo.allocationCount;
    11414  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    11415  inoutInfo.usedBytes += srcInfo.usedBytes;
    11416  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    11417  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    11418  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    11419  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    11420  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    11421 }
    11422 
    11423 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    11424 {
    11425  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    11426  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    11427  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    11428  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    11429 }
    11430 
    11431 VmaPool_T::VmaPool_T(
    11432  VmaAllocator hAllocator,
    11433  const VmaPoolCreateInfo& createInfo,
    11434  VkDeviceSize preferredBlockSize) :
    11435  m_BlockVector(
    11436  hAllocator,
    11437  this, // hParentPool
    11438  createInfo.memoryTypeIndex,
    11439  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
    11440  createInfo.minBlockCount,
    11441  createInfo.maxBlockCount,
    11442  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    11443  createInfo.frameInUseCount,
    11444  true, // isCustomPool
    11445  createInfo.blockSize != 0, // explicitBlockSize
    11446  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
    11447  m_Id(0)
    11448 {
    11449 }
    11450 
    11451 VmaPool_T::~VmaPool_T()
    11452 {
    11453 }
    11454 
    11455 #if VMA_STATS_STRING_ENABLED
    11456 
    11457 #endif // #if VMA_STATS_STRING_ENABLED
    11458 
    11459 VmaBlockVector::VmaBlockVector(
    11460  VmaAllocator hAllocator,
    11461  VmaPool hParentPool,
    11462  uint32_t memoryTypeIndex,
    11463  VkDeviceSize preferredBlockSize,
    11464  size_t minBlockCount,
    11465  size_t maxBlockCount,
    11466  VkDeviceSize bufferImageGranularity,
    11467  uint32_t frameInUseCount,
    11468  bool isCustomPool,
    11469  bool explicitBlockSize,
    11470  uint32_t algorithm) :
    11471  m_hAllocator(hAllocator),
    11472  m_hParentPool(hParentPool),
    11473  m_MemoryTypeIndex(memoryTypeIndex),
    11474  m_PreferredBlockSize(preferredBlockSize),
    11475  m_MinBlockCount(minBlockCount),
    11476  m_MaxBlockCount(maxBlockCount),
    11477  m_BufferImageGranularity(bufferImageGranularity),
    11478  m_FrameInUseCount(frameInUseCount),
    11479  m_IsCustomPool(isCustomPool),
    11480  m_ExplicitBlockSize(explicitBlockSize),
    11481  m_Algorithm(algorithm),
    11482  m_HasEmptyBlock(false),
    11483  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    11484  m_NextBlockId(0)
    11485 {
    11486 }
    11487 
    11488 VmaBlockVector::~VmaBlockVector()
    11489 {
    11490  for(size_t i = m_Blocks.size(); i--; )
    11491  {
    11492  m_Blocks[i]->Destroy(m_hAllocator);
    11493  vma_delete(m_hAllocator, m_Blocks[i]);
    11494  }
    11495 }
    11496 
    11497 VkResult VmaBlockVector::CreateMinBlocks()
    11498 {
    11499  for(size_t i = 0; i < m_MinBlockCount; ++i)
    11500  {
    11501  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    11502  if(res != VK_SUCCESS)
    11503  {
    11504  return res;
    11505  }
    11506  }
    11507  return VK_SUCCESS;
    11508 }
    11509 
    11510 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    11511 {
    11512  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    11513 
    11514  const size_t blockCount = m_Blocks.size();
    11515 
    11516  pStats->size = 0;
    11517  pStats->unusedSize = 0;
    11518  pStats->allocationCount = 0;
    11519  pStats->unusedRangeCount = 0;
    11520  pStats->unusedRangeSizeMax = 0;
    11521  pStats->blockCount = blockCount;
    11522 
    11523  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11524  {
    11525  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11526  VMA_ASSERT(pBlock);
    11527  VMA_HEAVY_ASSERT(pBlock->Validate());
    11528  pBlock->m_pMetadata->AddPoolStats(*pStats);
    11529  }
    11530 }
    11531 
    11532 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
    11533 {
    11534  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    11535  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
    11536  (VMA_DEBUG_MARGIN > 0) &&
    11537  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
    11538  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
    11539 }
    11540 
    11541 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    11542 
    11543 VkResult VmaBlockVector::Allocate(
    11544  uint32_t currentFrameIndex,
    11545  VkDeviceSize size,
    11546  VkDeviceSize alignment,
    11547  const VmaAllocationCreateInfo& createInfo,
    11548  VmaSuballocationType suballocType,
    11549  size_t allocationCount,
    11550  VmaAllocation* pAllocations)
    11551 {
    11552  size_t allocIndex;
    11553  VkResult res = VK_SUCCESS;
    11554 
    11555  if(IsCorruptionDetectionEnabled())
    11556  {
    11557  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
    11558  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
    11559  }
    11560 
    11561  {
    11562  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
    11563  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    11564  {
    11565  res = AllocatePage(
    11566  currentFrameIndex,
    11567  size,
    11568  alignment,
    11569  createInfo,
    11570  suballocType,
    11571  pAllocations + allocIndex);
    11572  if(res != VK_SUCCESS)
    11573  {
    11574  break;
    11575  }
    11576  }
    11577  }
    11578 
    11579  if(res != VK_SUCCESS)
    11580  {
    11581  // Free all already created allocations.
    11582  while(allocIndex--)
    11583  {
    11584  Free(pAllocations[allocIndex]);
    11585  }
    11586  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
    11587  }
    11588 
    11589  return res;
    11590 }
    11591 
    11592 VkResult VmaBlockVector::AllocatePage(
    11593  uint32_t currentFrameIndex,
    11594  VkDeviceSize size,
    11595  VkDeviceSize alignment,
    11596  const VmaAllocationCreateInfo& createInfo,
    11597  VmaSuballocationType suballocType,
    11598  VmaAllocation* pAllocation)
    11599 {
    11600  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    11601  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    11602  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    11603  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    11604  const bool canCreateNewBlock =
    11605  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    11606  (m_Blocks.size() < m_MaxBlockCount);
    11607  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
    11608 
    11609  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
    11610  // Which in turn is available only when maxBlockCount = 1.
    11611  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
    11612  {
    11613  canMakeOtherLost = false;
    11614  }
    11615 
    11616  // Upper address can only be used with linear allocator and within single memory block.
    11617  if(isUpperAddress &&
    11618  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
    11619  {
    11620  return VK_ERROR_FEATURE_NOT_PRESENT;
    11621  }
    11622 
    11623  // Validate strategy.
    11624  switch(strategy)
    11625  {
    11626  case 0:
    11628  break;
    11632  break;
    11633  default:
    11634  return VK_ERROR_FEATURE_NOT_PRESENT;
    11635  }
    11636 
    11637  // Early reject: requested allocation size is larger that maximum block size for this block vector.
    11638  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
    11639  {
    11640  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    11641  }
    11642 
    11643  /*
    11644  Under certain condition, this whole section can be skipped for optimization, so
    11645  we move on directly to trying to allocate with canMakeOtherLost. That's the case
    11646  e.g. for custom pools with linear algorithm.
    11647  */
    11648  if(!canMakeOtherLost || canCreateNewBlock)
    11649  {
    11650  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    11651  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
    11653 
    11654  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    11655  {
    11656  // Use only last block.
    11657  if(!m_Blocks.empty())
    11658  {
    11659  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
    11660  VMA_ASSERT(pCurrBlock);
    11661  VkResult res = AllocateFromBlock(
    11662  pCurrBlock,
    11663  currentFrameIndex,
    11664  size,
    11665  alignment,
    11666  allocFlagsCopy,
    11667  createInfo.pUserData,
    11668  suballocType,
    11669  strategy,
    11670  pAllocation);
    11671  if(res == VK_SUCCESS)
    11672  {
    11673  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
    11674  return VK_SUCCESS;
    11675  }
    11676  }
    11677  }
    11678  else
    11679  {
    11681  {
    11682  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    11683  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    11684  {
    11685  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11686  VMA_ASSERT(pCurrBlock);
    11687  VkResult res = AllocateFromBlock(
    11688  pCurrBlock,
    11689  currentFrameIndex,
    11690  size,
    11691  alignment,
    11692  allocFlagsCopy,
    11693  createInfo.pUserData,
    11694  suballocType,
    11695  strategy,
    11696  pAllocation);
    11697  if(res == VK_SUCCESS)
    11698  {
    11699  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    11700  return VK_SUCCESS;
    11701  }
    11702  }
    11703  }
    11704  else // WORST_FIT, FIRST_FIT
    11705  {
    11706  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    11707  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    11708  {
    11709  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11710  VMA_ASSERT(pCurrBlock);
    11711  VkResult res = AllocateFromBlock(
    11712  pCurrBlock,
    11713  currentFrameIndex,
    11714  size,
    11715  alignment,
    11716  allocFlagsCopy,
    11717  createInfo.pUserData,
    11718  suballocType,
    11719  strategy,
    11720  pAllocation);
    11721  if(res == VK_SUCCESS)
    11722  {
    11723  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    11724  return VK_SUCCESS;
    11725  }
    11726  }
    11727  }
    11728  }
    11729 
    11730  // 2. Try to create new block.
    11731  if(canCreateNewBlock)
    11732  {
    11733  // Calculate optimal size for new block.
    11734  VkDeviceSize newBlockSize = m_PreferredBlockSize;
    11735  uint32_t newBlockSizeShift = 0;
    11736  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
    11737 
    11738  if(!m_ExplicitBlockSize)
    11739  {
    11740  // Allocate 1/8, 1/4, 1/2 as first blocks.
    11741  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
    11742  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
    11743  {
    11744  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    11745  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
    11746  {
    11747  newBlockSize = smallerNewBlockSize;
    11748  ++newBlockSizeShift;
    11749  }
    11750  else
    11751  {
    11752  break;
    11753  }
    11754  }
    11755  }
    11756 
    11757  size_t newBlockIndex = 0;
    11758  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
    11759  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
    11760  if(!m_ExplicitBlockSize)
    11761  {
    11762  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
    11763  {
    11764  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    11765  if(smallerNewBlockSize >= size)
    11766  {
    11767  newBlockSize = smallerNewBlockSize;
    11768  ++newBlockSizeShift;
    11769  res = CreateBlock(newBlockSize, &newBlockIndex);
    11770  }
    11771  else
    11772  {
    11773  break;
    11774  }
    11775  }
    11776  }
    11777 
    11778  if(res == VK_SUCCESS)
    11779  {
    11780  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    11781  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
    11782 
    11783  res = AllocateFromBlock(
    11784  pBlock,
    11785  currentFrameIndex,
    11786  size,
    11787  alignment,
    11788  allocFlagsCopy,
    11789  createInfo.pUserData,
    11790  suballocType,
    11791  strategy,
    11792  pAllocation);
    11793  if(res == VK_SUCCESS)
    11794  {
    11795  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
    11796  return VK_SUCCESS;
    11797  }
    11798  else
    11799  {
    11800  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
    11801  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    11802  }
    11803  }
    11804  }
    11805  }
    11806 
    11807  // 3. Try to allocate from existing blocks with making other allocations lost.
    11808  if(canMakeOtherLost)
    11809  {
    11810  uint32_t tryIndex = 0;
    11811  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    11812  {
    11813  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    11814  VmaAllocationRequest bestRequest = {};
    11815  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    11816 
    11817  // 1. Search existing allocations.
    11819  {
    11820  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    11821  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    11822  {
    11823  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11824  VMA_ASSERT(pCurrBlock);
    11825  VmaAllocationRequest currRequest = {};
    11826  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    11827  currentFrameIndex,
    11828  m_FrameInUseCount,
    11829  m_BufferImageGranularity,
    11830  size,
    11831  alignment,
    11832  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    11833  suballocType,
    11834  canMakeOtherLost,
    11835  strategy,
    11836  &currRequest))
    11837  {
    11838  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    11839  if(pBestRequestBlock == VMA_NULL ||
    11840  currRequestCost < bestRequestCost)
    11841  {
    11842  pBestRequestBlock = pCurrBlock;
    11843  bestRequest = currRequest;
    11844  bestRequestCost = currRequestCost;
    11845 
    11846  if(bestRequestCost == 0)
    11847  {
    11848  break;
    11849  }
    11850  }
    11851  }
    11852  }
    11853  }
    11854  else // WORST_FIT, FIRST_FIT
    11855  {
    11856  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    11857  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    11858  {
    11859  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11860  VMA_ASSERT(pCurrBlock);
    11861  VmaAllocationRequest currRequest = {};
    11862  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    11863  currentFrameIndex,
    11864  m_FrameInUseCount,
    11865  m_BufferImageGranularity,
    11866  size,
    11867  alignment,
    11868  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    11869  suballocType,
    11870  canMakeOtherLost,
    11871  strategy,
    11872  &currRequest))
    11873  {
    11874  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    11875  if(pBestRequestBlock == VMA_NULL ||
    11876  currRequestCost < bestRequestCost ||
    11878  {
    11879  pBestRequestBlock = pCurrBlock;
    11880  bestRequest = currRequest;
    11881  bestRequestCost = currRequestCost;
    11882 
    11883  if(bestRequestCost == 0 ||
    11885  {
    11886  break;
    11887  }
    11888  }
    11889  }
    11890  }
    11891  }
    11892 
    11893  if(pBestRequestBlock != VMA_NULL)
    11894  {
    11895  if(mapped)
    11896  {
    11897  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
    11898  if(res != VK_SUCCESS)
    11899  {
    11900  return res;
    11901  }
    11902  }
    11903 
    11904  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
    11905  currentFrameIndex,
    11906  m_FrameInUseCount,
    11907  &bestRequest))
    11908  {
    11909  // We no longer have an empty Allocation.
    11910  if(pBestRequestBlock->m_pMetadata->IsEmpty())
    11911  {
    11912  m_HasEmptyBlock = false;
    11913  }
    11914  // Allocate from this pBlock.
    11915  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
    11916  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
    11917  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
    11918  (*pAllocation)->InitBlockAllocation(
    11919  pBestRequestBlock,
    11920  bestRequest.offset,
    11921  alignment,
    11922  size,
    11923  suballocType,
    11924  mapped,
    11925  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    11926  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
    11927  VMA_DEBUG_LOG(" Returned from existing block");
    11928  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    11929  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    11930  {
    11931  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    11932  }
    11933  if(IsCorruptionDetectionEnabled())
    11934  {
    11935  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
    11936  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    11937  }
    11938  return VK_SUCCESS;
    11939  }
    11940  // else: Some allocations must have been touched while we are here. Next try.
    11941  }
    11942  else
    11943  {
    11944  // Could not find place in any of the blocks - break outer loop.
    11945  break;
    11946  }
    11947  }
    11948  /* Maximum number of tries exceeded - a very unlike event when many other
    11949  threads are simultaneously touching allocations making it impossible to make
    11950  lost at the same time as we try to allocate. */
    11951  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    11952  {
    11953  return VK_ERROR_TOO_MANY_OBJECTS;
    11954  }
    11955  }
    11956 
    11957  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    11958 }
    11959 
    11960 void VmaBlockVector::Free(
    11961  VmaAllocation hAllocation)
    11962 {
    11963  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    11964 
    11965  // Scope for lock.
    11966  {
    11967  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
    11968 
    11969  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    11970 
    11971  if(IsCorruptionDetectionEnabled())
    11972  {
    11973  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
    11974  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
    11975  }
    11976 
    11977  if(hAllocation->IsPersistentMap())
    11978  {
    11979  pBlock->Unmap(m_hAllocator, 1);
    11980  }
    11981 
    11982  pBlock->m_pMetadata->Free(hAllocation);
    11983  VMA_HEAVY_ASSERT(pBlock->Validate());
    11984 
    11985  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
    11986 
    11987  // pBlock became empty after this deallocation.
    11988  if(pBlock->m_pMetadata->IsEmpty())
    11989  {
    11990  // Already has empty Allocation. We don't want to have two, so delete this one.
    11991  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    11992  {
    11993  pBlockToDelete = pBlock;
    11994  Remove(pBlock);
    11995  }
    11996  // We now have first empty block.
    11997  else
    11998  {
    11999  m_HasEmptyBlock = true;
    12000  }
    12001  }
    12002  // pBlock didn't become empty, but we have another empty block - find and free that one.
    12003  // (This is optional, heuristics.)
    12004  else if(m_HasEmptyBlock)
    12005  {
    12006  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    12007  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    12008  {
    12009  pBlockToDelete = pLastBlock;
    12010  m_Blocks.pop_back();
    12011  m_HasEmptyBlock = false;
    12012  }
    12013  }
    12014 
    12015  IncrementallySortBlocks();
    12016  }
    12017 
    12018  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    12019  // lock, for performance reason.
    12020  if(pBlockToDelete != VMA_NULL)
    12021  {
    12022  VMA_DEBUG_LOG(" Deleted empty allocation");
    12023  pBlockToDelete->Destroy(m_hAllocator);
    12024  vma_delete(m_hAllocator, pBlockToDelete);
    12025  }
    12026 }
    12027 
    12028 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
    12029 {
    12030  VkDeviceSize result = 0;
    12031  for(size_t i = m_Blocks.size(); i--; )
    12032  {
    12033  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
    12034  if(result >= m_PreferredBlockSize)
    12035  {
    12036  break;
    12037  }
    12038  }
    12039  return result;
    12040 }
    12041 
    12042 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    12043 {
    12044  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    12045  {
    12046  if(m_Blocks[blockIndex] == pBlock)
    12047  {
    12048  VmaVectorRemove(m_Blocks, blockIndex);
    12049  return;
    12050  }
    12051  }
    12052  VMA_ASSERT(0);
    12053 }
    12054 
    12055 void VmaBlockVector::IncrementallySortBlocks()
    12056 {
    12057  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    12058  {
    12059  // Bubble sort only until first swap.
    12060  for(size_t i = 1; i < m_Blocks.size(); ++i)
    12061  {
    12062  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
    12063  {
    12064  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    12065  return;
    12066  }
    12067  }
    12068  }
    12069 }
    12070 
    12071 VkResult VmaBlockVector::AllocateFromBlock(
    12072  VmaDeviceMemoryBlock* pBlock,
    12073  uint32_t currentFrameIndex,
    12074  VkDeviceSize size,
    12075  VkDeviceSize alignment,
    12076  VmaAllocationCreateFlags allocFlags,
    12077  void* pUserData,
    12078  VmaSuballocationType suballocType,
    12079  uint32_t strategy,
    12080  VmaAllocation* pAllocation)
    12081 {
    12082  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
    12083  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    12084  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    12085  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    12086 
    12087  VmaAllocationRequest currRequest = {};
    12088  if(pBlock->m_pMetadata->CreateAllocationRequest(
    12089  currentFrameIndex,
    12090  m_FrameInUseCount,
    12091  m_BufferImageGranularity,
    12092  size,
    12093  alignment,
    12094  isUpperAddress,
    12095  suballocType,
    12096  false, // canMakeOtherLost
    12097  strategy,
    12098  &currRequest))
    12099  {
    12100  // Allocate from pCurrBlock.
    12101  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    12102 
    12103  if(mapped)
    12104  {
    12105  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
    12106  if(res != VK_SUCCESS)
    12107  {
    12108  return res;
    12109  }
    12110  }
    12111 
    12112  // We no longer have an empty Allocation.
    12113  if(pBlock->m_pMetadata->IsEmpty())
    12114  {
    12115  m_HasEmptyBlock = false;
    12116  }
    12117 
    12118  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
    12119  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
    12120  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
    12121  (*pAllocation)->InitBlockAllocation(
    12122  pBlock,
    12123  currRequest.offset,
    12124  alignment,
    12125  size,
    12126  suballocType,
    12127  mapped,
    12128  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    12129  VMA_HEAVY_ASSERT(pBlock->Validate());
    12130  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
    12131  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12132  {
    12133  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    12134  }
    12135  if(IsCorruptionDetectionEnabled())
    12136  {
    12137  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
    12138  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    12139  }
    12140  return VK_SUCCESS;
    12141  }
    12142  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12143 }
    12144 
    12145 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    12146 {
    12147  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    12148  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    12149  allocInfo.allocationSize = blockSize;
    12150  VkDeviceMemory mem = VK_NULL_HANDLE;
    12151  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    12152  if(res < 0)
    12153  {
    12154  return res;
    12155  }
    12156 
    12157  // New VkDeviceMemory successfully created.
    12158 
    12159  // Create new Allocation for it.
    12160  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    12161  pBlock->Init(
    12162  m_hAllocator,
    12163  m_hParentPool,
    12164  m_MemoryTypeIndex,
    12165  mem,
    12166  allocInfo.allocationSize,
    12167  m_NextBlockId++,
    12168  m_Algorithm);
    12169 
    12170  m_Blocks.push_back(pBlock);
    12171  if(pNewBlockIndex != VMA_NULL)
    12172  {
    12173  *pNewBlockIndex = m_Blocks.size() - 1;
    12174  }
    12175 
    12176  return VK_SUCCESS;
    12177 }
    12178 
    12179 void VmaBlockVector::ApplyDefragmentationMovesCpu(
    12180  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    12181  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
    12182 {
    12183  const size_t blockCount = m_Blocks.size();
    12184  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
    12185 
    12186  enum BLOCK_FLAG
    12187  {
    12188  BLOCK_FLAG_USED = 0x00000001,
    12189  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
    12190  };
    12191 
    12192  struct BlockInfo
    12193  {
    12194  uint32_t flags;
    12195  void* pMappedData;
    12196  };
    12197  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
    12198  blockInfo(blockCount, VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
    12199  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
    12200 
    12201  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
    12202  const size_t moveCount = moves.size();
    12203  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12204  {
    12205  const VmaDefragmentationMove& move = moves[moveIndex];
    12206  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
    12207  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
    12208  }
    12209 
    12210  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
    12211 
    12212  // Go over all blocks. Get mapped pointer or map if necessary.
    12213  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
    12214  {
    12215  BlockInfo& currBlockInfo = blockInfo[blockIndex];
    12216  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12217  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
    12218  {
    12219  currBlockInfo.pMappedData = pBlock->GetMappedData();
    12220  // It is not originally mapped - map it.
    12221  if(currBlockInfo.pMappedData == VMA_NULL)
    12222  {
    12223  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
    12224  if(pDefragCtx->res == VK_SUCCESS)
    12225  {
    12226  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
    12227  }
    12228  }
    12229  }
    12230  }
    12231 
    12232  // Go over all moves. Do actual data transfer.
    12233  if(pDefragCtx->res == VK_SUCCESS)
    12234  {
    12235  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    12236  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    12237 
    12238  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12239  {
    12240  const VmaDefragmentationMove& move = moves[moveIndex];
    12241 
    12242  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
    12243  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
    12244 
    12245  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
    12246 
    12247  // Invalidate source.
    12248  if(isNonCoherent)
    12249  {
    12250  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
    12251  memRange.memory = pSrcBlock->GetDeviceMemory();
    12252  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
    12253  memRange.size = VMA_MIN(
    12254  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
    12255  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
    12256  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
    12257  }
    12258 
    12259  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    12260  memmove(
    12261  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
    12262  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
    12263  static_cast<size_t>(move.size));
    12264 
    12265  if(IsCorruptionDetectionEnabled())
    12266  {
    12267  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
    12268  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
    12269  }
    12270 
    12271  // Flush destination.
    12272  if(isNonCoherent)
    12273  {
    12274  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
    12275  memRange.memory = pDstBlock->GetDeviceMemory();
    12276  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
    12277  memRange.size = VMA_MIN(
    12278  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
    12279  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
    12280  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
    12281  }
    12282  }
    12283  }
    12284 
    12285  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
    12286  // Regardless of pCtx->res == VK_SUCCESS.
    12287  for(size_t blockIndex = blockCount; blockIndex--; )
    12288  {
    12289  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
    12290  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
    12291  {
    12292  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12293  pBlock->Unmap(m_hAllocator, 1);
    12294  }
    12295  }
    12296 }
    12297 
    12298 void VmaBlockVector::ApplyDefragmentationMovesGpu(
    12299  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    12300  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12301  VkCommandBuffer commandBuffer)
    12302 {
    12303  const size_t blockCount = m_Blocks.size();
    12304 
    12305  pDefragCtx->blockContexts.resize(blockCount);
    12306  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
    12307 
    12308  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
    12309  const size_t moveCount = moves.size();
    12310  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12311  {
    12312  const VmaDefragmentationMove& move = moves[moveIndex];
    12313  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
    12314  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
    12315  }
    12316 
    12317  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
    12318 
    12319  // Go over all blocks. Create and bind buffer for whole block if necessary.
    12320  {
    12321  VkBufferCreateInfo bufCreateInfo;
    12322  VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo);
    12323 
    12324  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
    12325  {
    12326  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
    12327  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12328  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
    12329  {
    12330  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
    12331  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
    12332  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
    12333  if(pDefragCtx->res == VK_SUCCESS)
    12334  {
    12335  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
    12336  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
    12337  }
    12338  }
    12339  }
    12340  }
    12341 
    12342  // Go over all moves. Post data transfer commands to command buffer.
    12343  if(pDefragCtx->res == VK_SUCCESS)
    12344  {
    12345  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12346  {
    12347  const VmaDefragmentationMove& move = moves[moveIndex];
    12348 
    12349  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
    12350  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
    12351 
    12352  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
    12353 
    12354  VkBufferCopy region = {
    12355  move.srcOffset,
    12356  move.dstOffset,
    12357  move.size };
    12358  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
    12359  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
    12360  }
    12361  }
    12362 
    12363  // Save buffers to defrag context for later destruction.
    12364  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
    12365  {
    12366  pDefragCtx->res = VK_NOT_READY;
    12367  }
    12368 }
    12369 
    12370 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
    12371 {
    12372  m_HasEmptyBlock = false;
    12373  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    12374  {
    12375  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12376  if(pBlock->m_pMetadata->IsEmpty())
    12377  {
    12378  if(m_Blocks.size() > m_MinBlockCount)
    12379  {
    12380  if(pDefragmentationStats != VMA_NULL)
    12381  {
    12382  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    12383  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
    12384  }
    12385 
    12386  VmaVectorRemove(m_Blocks, blockIndex);
    12387  pBlock->Destroy(m_hAllocator);
    12388  vma_delete(m_hAllocator, pBlock);
    12389  }
    12390  else
    12391  {
    12392  m_HasEmptyBlock = true;
    12393  }
    12394  }
    12395  }
    12396 }
    12397 
    12398 #if VMA_STATS_STRING_ENABLED
    12399 
    12400 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    12401 {
    12402  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    12403 
    12404  json.BeginObject();
    12405 
    12406  if(m_IsCustomPool)
    12407  {
    12408  json.WriteString("MemoryTypeIndex");
    12409  json.WriteNumber(m_MemoryTypeIndex);
    12410 
    12411  json.WriteString("BlockSize");
    12412  json.WriteNumber(m_PreferredBlockSize);
    12413 
    12414  json.WriteString("BlockCount");
    12415  json.BeginObject(true);
    12416  if(m_MinBlockCount > 0)
    12417  {
    12418  json.WriteString("Min");
    12419  json.WriteNumber((uint64_t)m_MinBlockCount);
    12420  }
    12421  if(m_MaxBlockCount < SIZE_MAX)
    12422  {
    12423  json.WriteString("Max");
    12424  json.WriteNumber((uint64_t)m_MaxBlockCount);
    12425  }
    12426  json.WriteString("Cur");
    12427  json.WriteNumber((uint64_t)m_Blocks.size());
    12428  json.EndObject();
    12429 
    12430  if(m_FrameInUseCount > 0)
    12431  {
    12432  json.WriteString("FrameInUseCount");
    12433  json.WriteNumber(m_FrameInUseCount);
    12434  }
    12435 
    12436  if(m_Algorithm != 0)
    12437  {
    12438  json.WriteString("Algorithm");
    12439  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
    12440  }
    12441  }
    12442  else
    12443  {
    12444  json.WriteString("PreferredBlockSize");
    12445  json.WriteNumber(m_PreferredBlockSize);
    12446  }
    12447 
    12448  json.WriteString("Blocks");
    12449  json.BeginObject();
    12450  for(size_t i = 0; i < m_Blocks.size(); ++i)
    12451  {
    12452  json.BeginString();
    12453  json.ContinueString(m_Blocks[i]->GetId());
    12454  json.EndString();
    12455 
    12456  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
    12457  }
    12458  json.EndObject();
    12459 
    12460  json.EndObject();
    12461 }
    12462 
    12463 #endif // #if VMA_STATS_STRING_ENABLED
    12464 
    12465 void VmaBlockVector::Defragment(
    12466  class VmaBlockVectorDefragmentationContext* pCtx,
    12467  VmaDefragmentationStats* pStats,
    12468  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
    12469  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
    12470  VkCommandBuffer commandBuffer)
    12471 {
    12472  pCtx->res = VK_SUCCESS;
    12473 
    12474  const VkMemoryPropertyFlags memPropFlags =
    12475  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
    12476  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
    12477 
    12478  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
    12479  isHostVisible;
    12480  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
    12481  !IsCorruptionDetectionEnabled() &&
    12482  ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0;
    12483 
    12484  // There are options to defragment this memory type.
    12485  if(canDefragmentOnCpu || canDefragmentOnGpu)
    12486  {
    12487  bool defragmentOnGpu;
    12488  // There is only one option to defragment this memory type.
    12489  if(canDefragmentOnGpu != canDefragmentOnCpu)
    12490  {
    12491  defragmentOnGpu = canDefragmentOnGpu;
    12492  }
    12493  // Both options are available: Heuristics to choose the best one.
    12494  else
    12495  {
    12496  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
    12497  m_hAllocator->IsIntegratedGpu();
    12498  }
    12499 
    12500  bool overlappingMoveSupported = !defragmentOnGpu;
    12501 
    12502  if(m_hAllocator->m_UseMutex)
    12503  {
    12504  m_Mutex.LockWrite();
    12505  pCtx->mutexLocked = true;
    12506  }
    12507 
    12508  pCtx->Begin(overlappingMoveSupported);
    12509 
    12510  // Defragment.
    12511 
    12512  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
    12513  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
    12514  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
    12515  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
    12516  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
    12517 
    12518  // Accumulate statistics.
    12519  if(pStats != VMA_NULL)
    12520  {
    12521  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
    12522  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
    12523  pStats->bytesMoved += bytesMoved;
    12524  pStats->allocationsMoved += allocationsMoved;
    12525  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    12526  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    12527  if(defragmentOnGpu)
    12528  {
    12529  maxGpuBytesToMove -= bytesMoved;
    12530  maxGpuAllocationsToMove -= allocationsMoved;
    12531  }
    12532  else
    12533  {
    12534  maxCpuBytesToMove -= bytesMoved;
    12535  maxCpuAllocationsToMove -= allocationsMoved;
    12536  }
    12537  }
    12538 
    12539  if(pCtx->res >= VK_SUCCESS)
    12540  {
    12541  if(defragmentOnGpu)
    12542  {
    12543  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
    12544  }
    12545  else
    12546  {
    12547  ApplyDefragmentationMovesCpu(pCtx, moves);
    12548  }
    12549  }
    12550  }
    12551 }
    12552 
    12553 void VmaBlockVector::DefragmentationEnd(
    12554  class VmaBlockVectorDefragmentationContext* pCtx,
    12555  VmaDefragmentationStats* pStats)
    12556 {
    12557  // Destroy buffers.
    12558  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
    12559  {
    12560  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
    12561  if(blockCtx.hBuffer)
    12562  {
    12563  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
    12564  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
    12565  }
    12566  }
    12567 
    12568  if(pCtx->res >= VK_SUCCESS)
    12569  {
    12570  FreeEmptyBlocks(pStats);
    12571  }
    12572 
    12573  if(pCtx->mutexLocked)
    12574  {
    12575  VMA_ASSERT(m_hAllocator->m_UseMutex);
    12576  m_Mutex.UnlockWrite();
    12577  }
    12578 }
    12579 
    12580 size_t VmaBlockVector::CalcAllocationCount() const
    12581 {
    12582  size_t result = 0;
    12583  for(size_t i = 0; i < m_Blocks.size(); ++i)
    12584  {
    12585  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
    12586  }
    12587  return result;
    12588 }
    12589 
    12590 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
    12591 {
    12592  if(m_BufferImageGranularity == 1)
    12593  {
    12594  return false;
    12595  }
    12596  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
    12597  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
    12598  {
    12599  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
    12600  VMA_ASSERT(m_Algorithm == 0);
    12601  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
    12602  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
    12603  {
    12604  return true;
    12605  }
    12606  }
    12607  return false;
    12608 }
    12609 
    12610 void VmaBlockVector::MakePoolAllocationsLost(
    12611  uint32_t currentFrameIndex,
    12612  size_t* pLostAllocationCount)
    12613 {
    12614  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
    12615  size_t lostAllocationCount = 0;
    12616  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    12617  {
    12618  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    12619  VMA_ASSERT(pBlock);
    12620  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    12621  }
    12622  if(pLostAllocationCount != VMA_NULL)
    12623  {
    12624  *pLostAllocationCount = lostAllocationCount;
    12625  }
    12626 }
    12627 
    12628 VkResult VmaBlockVector::CheckCorruption()
    12629 {
    12630  if(!IsCorruptionDetectionEnabled())
    12631  {
    12632  return VK_ERROR_FEATURE_NOT_PRESENT;
    12633  }
    12634 
    12635  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    12636  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    12637  {
    12638  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    12639  VMA_ASSERT(pBlock);
    12640  VkResult res = pBlock->CheckCorruption(m_hAllocator);
    12641  if(res != VK_SUCCESS)
    12642  {
    12643  return res;
    12644  }
    12645  }
    12646  return VK_SUCCESS;
    12647 }
    12648 
    12649 void VmaBlockVector::AddStats(VmaStats* pStats)
    12650 {
    12651  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    12652  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    12653 
    12654  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    12655 
    12656  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    12657  {
    12658  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    12659  VMA_ASSERT(pBlock);
    12660  VMA_HEAVY_ASSERT(pBlock->Validate());
    12661  VmaStatInfo allocationStatInfo;
    12662  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
    12663  VmaAddStatInfo(pStats->total, allocationStatInfo);
    12664  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    12665  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    12666  }
    12667 }
    12668 
    12670 // VmaDefragmentationAlgorithm_Generic members definition
    12671 
    12672 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
    12673  VmaAllocator hAllocator,
    12674  VmaBlockVector* pBlockVector,
    12675  uint32_t currentFrameIndex,
    12676  bool overlappingMoveSupported) :
    12677  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
    12678  m_AllocationCount(0),
    12679  m_AllAllocations(false),
    12680  m_BytesMoved(0),
    12681  m_AllocationsMoved(0),
    12682  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    12683 {
    12684  // Create block info for each block.
    12685  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    12686  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    12687  {
    12688  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    12689  pBlockInfo->m_OriginalBlockIndex = blockIndex;
    12690  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    12691  m_Blocks.push_back(pBlockInfo);
    12692  }
    12693 
    12694  // Sort them by m_pBlock pointer value.
    12695  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    12696 }
    12697 
    12698 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
    12699 {
    12700  for(size_t i = m_Blocks.size(); i--; )
    12701  {
    12702  vma_delete(m_hAllocator, m_Blocks[i]);
    12703  }
    12704 }
    12705 
    12706 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    12707 {
    12708  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    12709  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    12710  {
    12711  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
    12712  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    12713  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    12714  {
    12715  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
    12716  (*it)->m_Allocations.push_back(allocInfo);
    12717  }
    12718  else
    12719  {
    12720  VMA_ASSERT(0);
    12721  }
    12722 
    12723  ++m_AllocationCount;
    12724  }
    12725 }
    12726 
    12727 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
    12728  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12729  VkDeviceSize maxBytesToMove,
    12730  uint32_t maxAllocationsToMove)
    12731 {
    12732  if(m_Blocks.empty())
    12733  {
    12734  return VK_SUCCESS;
    12735  }
    12736 
    12737  // This is a choice based on research.
    12738  // Option 1:
    12739  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
    12740  // Option 2:
    12741  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
    12742  // Option 3:
    12743  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
    12744 
    12745  size_t srcBlockMinIndex = 0;
    12746  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
    12747  /*
    12748  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
    12749  {
    12750  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
    12751  if(blocksWithNonMovableCount > 0)
    12752  {
    12753  srcBlockMinIndex = blocksWithNonMovableCount - 1;
    12754  }
    12755  }
    12756  */
    12757 
    12758  size_t srcBlockIndex = m_Blocks.size() - 1;
    12759  size_t srcAllocIndex = SIZE_MAX;
    12760  for(;;)
    12761  {
    12762  // 1. Find next allocation to move.
    12763  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    12764  // 1.2. Then start from last to first m_Allocations.
    12765  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    12766  {
    12767  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    12768  {
    12769  // Finished: no more allocations to process.
    12770  if(srcBlockIndex == srcBlockMinIndex)
    12771  {
    12772  return VK_SUCCESS;
    12773  }
    12774  else
    12775  {
    12776  --srcBlockIndex;
    12777  srcAllocIndex = SIZE_MAX;
    12778  }
    12779  }
    12780  else
    12781  {
    12782  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    12783  }
    12784  }
    12785 
    12786  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    12787  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    12788 
    12789  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    12790  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    12791  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    12792  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    12793 
    12794  // 2. Try to find new place for this allocation in preceding or current block.
    12795  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    12796  {
    12797  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    12798  VmaAllocationRequest dstAllocRequest;
    12799  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
    12800  m_CurrentFrameIndex,
    12801  m_pBlockVector->GetFrameInUseCount(),
    12802  m_pBlockVector->GetBufferImageGranularity(),
    12803  size,
    12804  alignment,
    12805  false, // upperAddress
    12806  suballocType,
    12807  false, // canMakeOtherLost
    12808  strategy,
    12809  &dstAllocRequest) &&
    12810  MoveMakesSense(
    12811  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    12812  {
    12813  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    12814 
    12815  // Reached limit on number of allocations or bytes to move.
    12816  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    12817  (m_BytesMoved + size > maxBytesToMove))
    12818  {
    12819  return VK_SUCCESS;
    12820  }
    12821 
    12822  VmaDefragmentationMove move;
    12823  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
    12824  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
    12825  move.srcOffset = srcOffset;
    12826  move.dstOffset = dstAllocRequest.offset;
    12827  move.size = size;
    12828  moves.push_back(move);
    12829 
    12830  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
    12831  dstAllocRequest,
    12832  suballocType,
    12833  size,
    12834  allocInfo.m_hAllocation);
    12835  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
    12836 
    12837  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    12838 
    12839  if(allocInfo.m_pChanged != VMA_NULL)
    12840  {
    12841  *allocInfo.m_pChanged = VK_TRUE;
    12842  }
    12843 
    12844  ++m_AllocationsMoved;
    12845  m_BytesMoved += size;
    12846 
    12847  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    12848 
    12849  break;
    12850  }
    12851  }
    12852 
    12853  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    12854 
    12855  if(srcAllocIndex > 0)
    12856  {
    12857  --srcAllocIndex;
    12858  }
    12859  else
    12860  {
    12861  if(srcBlockIndex > 0)
    12862  {
    12863  --srcBlockIndex;
    12864  srcAllocIndex = SIZE_MAX;
    12865  }
    12866  else
    12867  {
    12868  return VK_SUCCESS;
    12869  }
    12870  }
    12871  }
    12872 }
    12873 
    12874 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
    12875 {
    12876  size_t result = 0;
    12877  for(size_t i = 0; i < m_Blocks.size(); ++i)
    12878  {
    12879  if(m_Blocks[i]->m_HasNonMovableAllocations)
    12880  {
    12881  ++result;
    12882  }
    12883  }
    12884  return result;
    12885 }
    12886 
    12887 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
    12888  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12889  VkDeviceSize maxBytesToMove,
    12890  uint32_t maxAllocationsToMove)
    12891 {
    12892  if(!m_AllAllocations && m_AllocationCount == 0)
    12893  {
    12894  return VK_SUCCESS;
    12895  }
    12896 
    12897  const size_t blockCount = m_Blocks.size();
    12898  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    12899  {
    12900  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    12901 
    12902  if(m_AllAllocations)
    12903  {
    12904  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
    12905  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
    12906  it != pMetadata->m_Suballocations.end();
    12907  ++it)
    12908  {
    12909  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    12910  {
    12911  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
    12912  pBlockInfo->m_Allocations.push_back(allocInfo);
    12913  }
    12914  }
    12915  }
    12916 
    12917  pBlockInfo->CalcHasNonMovableAllocations();
    12918 
    12919  // This is a choice based on research.
    12920  // Option 1:
    12921  pBlockInfo->SortAllocationsByOffsetDescending();
    12922  // Option 2:
    12923  //pBlockInfo->SortAllocationsBySizeDescending();
    12924  }
    12925 
    12926  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    12927  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    12928 
    12929  // This is a choice based on research.
    12930  const uint32_t roundCount = 2;
    12931 
    12932  // Execute defragmentation rounds (the main part).
    12933  VkResult result = VK_SUCCESS;
    12934  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
    12935  {
    12936  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
    12937  }
    12938 
    12939  return result;
    12940 }
    12941 
    12942 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
    12943  size_t dstBlockIndex, VkDeviceSize dstOffset,
    12944  size_t srcBlockIndex, VkDeviceSize srcOffset)
    12945 {
    12946  if(dstBlockIndex < srcBlockIndex)
    12947  {
    12948  return true;
    12949  }
    12950  if(dstBlockIndex > srcBlockIndex)
    12951  {
    12952  return false;
    12953  }
    12954  if(dstOffset < srcOffset)
    12955  {
    12956  return true;
    12957  }
    12958  return false;
    12959 }
    12960 
    12962 // VmaDefragmentationAlgorithm_Fast
    12963 
    12964 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
    12965  VmaAllocator hAllocator,
    12966  VmaBlockVector* pBlockVector,
    12967  uint32_t currentFrameIndex,
    12968  bool overlappingMoveSupported) :
    12969  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
    12970  m_OverlappingMoveSupported(overlappingMoveSupported),
    12971  m_AllocationCount(0),
    12972  m_AllAllocations(false),
    12973  m_BytesMoved(0),
    12974  m_AllocationsMoved(0),
    12975  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
    12976 {
    12977  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
    12978 
    12979 }
    12980 
    12981 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
    12982 {
    12983 }
    12984 
    12985 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
    12986  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12987  VkDeviceSize maxBytesToMove,
    12988  uint32_t maxAllocationsToMove)
    12989 {
    12990  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
    12991 
    12992  const size_t blockCount = m_pBlockVector->GetBlockCount();
    12993  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
    12994  {
    12995  return VK_SUCCESS;
    12996  }
    12997 
    12998  PreprocessMetadata();
    12999 
    13000  // Sort blocks in order from most destination.
    13001 
    13002  m_BlockInfos.resize(blockCount);
    13003  for(size_t i = 0; i < blockCount; ++i)
    13004  {
    13005  m_BlockInfos[i].origBlockIndex = i;
    13006  }
    13007 
    13008  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
    13009  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
    13010  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
    13011  });
    13012 
    13013  // THE MAIN ALGORITHM
    13014 
    13015  FreeSpaceDatabase freeSpaceDb;
    13016 
    13017  size_t dstBlockInfoIndex = 0;
    13018  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
    13019  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
    13020  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
    13021  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
    13022  VkDeviceSize dstOffset = 0;
    13023 
    13024  bool end = false;
    13025  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
    13026  {
    13027  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
    13028  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
    13029  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
    13030  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
    13031  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
    13032  {
    13033  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
    13034  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
    13035  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
    13036  if(m_AllocationsMoved == maxAllocationsToMove ||
    13037  m_BytesMoved + srcAllocSize > maxBytesToMove)
    13038  {
    13039  end = true;
    13040  break;
    13041  }
    13042  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
    13043 
    13044  // Try to place it in one of free spaces from the database.
    13045  size_t freeSpaceInfoIndex;
    13046  VkDeviceSize dstAllocOffset;
    13047  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
    13048  freeSpaceInfoIndex, dstAllocOffset))
    13049  {
    13050  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
    13051  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
    13052  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
    13053 
    13054  // Same block
    13055  if(freeSpaceInfoIndex == srcBlockInfoIndex)
    13056  {
    13057  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
    13058 
    13059  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
    13060 
    13061  VmaSuballocation suballoc = *srcSuballocIt;
    13062  suballoc.offset = dstAllocOffset;
    13063  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
    13064  m_BytesMoved += srcAllocSize;
    13065  ++m_AllocationsMoved;
    13066 
    13067  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
    13068  ++nextSuballocIt;
    13069  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
    13070  srcSuballocIt = nextSuballocIt;
    13071 
    13072  InsertSuballoc(pFreeSpaceMetadata, suballoc);
    13073 
    13074  VmaDefragmentationMove move = {
    13075  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
    13076  srcAllocOffset, dstAllocOffset,
    13077  srcAllocSize };
    13078  moves.push_back(move);
    13079  }
    13080  // Different block
    13081  else
    13082  {
    13083  // MOVE OPTION 2: Move the allocation to a different block.
    13084 
    13085  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
    13086 
    13087  VmaSuballocation suballoc = *srcSuballocIt;
    13088  suballoc.offset = dstAllocOffset;
    13089  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
    13090  m_BytesMoved += srcAllocSize;
    13091  ++m_AllocationsMoved;
    13092 
    13093  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
    13094  ++nextSuballocIt;
    13095  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
    13096  srcSuballocIt = nextSuballocIt;
    13097 
    13098  InsertSuballoc(pFreeSpaceMetadata, suballoc);
    13099 
    13100  VmaDefragmentationMove move = {
    13101  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
    13102  srcAllocOffset, dstAllocOffset,
    13103  srcAllocSize };
    13104  moves.push_back(move);
    13105  }
    13106  }
    13107  else
    13108  {
    13109  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
    13110 
    13111  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
    13112  while(dstBlockInfoIndex < srcBlockInfoIndex &&
    13113  dstAllocOffset + srcAllocSize > dstBlockSize)
    13114  {
    13115  // But before that, register remaining free space at the end of dst block.
    13116  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
    13117 
    13118  ++dstBlockInfoIndex;
    13119  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
    13120  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
    13121  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
    13122  dstBlockSize = pDstMetadata->GetSize();
    13123  dstOffset = 0;
    13124  dstAllocOffset = 0;
    13125  }
    13126 
    13127  // Same block
    13128  if(dstBlockInfoIndex == srcBlockInfoIndex)
    13129  {
    13130  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
    13131 
    13132  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
    13133 
    13134  bool skipOver = overlap;
    13135  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
    13136  {
    13137  // If destination and source place overlap, skip if it would move it
    13138  // by only < 1/64 of its size.
    13139  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
    13140  }
    13141 
    13142  if(skipOver)
    13143  {
    13144  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
    13145 
    13146  dstOffset = srcAllocOffset + srcAllocSize;
    13147  ++srcSuballocIt;
    13148  }
    13149  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
    13150  else
    13151  {
    13152  srcSuballocIt->offset = dstAllocOffset;
    13153  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
    13154  dstOffset = dstAllocOffset + srcAllocSize;
    13155  m_BytesMoved += srcAllocSize;
    13156  ++m_AllocationsMoved;
    13157  ++srcSuballocIt;
    13158  VmaDefragmentationMove move = {
    13159  srcOrigBlockIndex, dstOrigBlockIndex,
    13160  srcAllocOffset, dstAllocOffset,
    13161  srcAllocSize };
    13162  moves.push_back(move);
    13163  }
    13164  }
    13165  // Different block
    13166  else
    13167  {
    13168  // MOVE OPTION 2: Move the allocation to a different block.
    13169 
    13170  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
    13171  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
    13172 
    13173  VmaSuballocation suballoc = *srcSuballocIt;
    13174  suballoc.offset = dstAllocOffset;
    13175  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
    13176  dstOffset = dstAllocOffset + srcAllocSize;
    13177  m_BytesMoved += srcAllocSize;
    13178  ++m_AllocationsMoved;
    13179 
    13180  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
    13181  ++nextSuballocIt;
    13182  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
    13183  srcSuballocIt = nextSuballocIt;
    13184 
    13185  pDstMetadata->m_Suballocations.push_back(suballoc);
    13186 
    13187  VmaDefragmentationMove move = {
    13188  srcOrigBlockIndex, dstOrigBlockIndex,
    13189  srcAllocOffset, dstAllocOffset,
    13190  srcAllocSize };
    13191  moves.push_back(move);
    13192  }
    13193  }
    13194  }
    13195  }
    13196 
    13197  m_BlockInfos.clear();
    13198 
    13199  PostprocessMetadata();
    13200 
    13201  return VK_SUCCESS;
    13202 }
    13203 
    13204 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
    13205 {
    13206  const size_t blockCount = m_pBlockVector->GetBlockCount();
    13207  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    13208  {
    13209  VmaBlockMetadata_Generic* const pMetadata =
    13210  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
    13211  pMetadata->m_FreeCount = 0;
    13212  pMetadata->m_SumFreeSize = pMetadata->GetSize();
    13213  pMetadata->m_FreeSuballocationsBySize.clear();
    13214  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
    13215  it != pMetadata->m_Suballocations.end(); )
    13216  {
    13217  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
    13218  {
    13219  VmaSuballocationList::iterator nextIt = it;
    13220  ++nextIt;
    13221  pMetadata->m_Suballocations.erase(it);
    13222  it = nextIt;
    13223  }
    13224  else
    13225  {
    13226  ++it;
    13227  }
    13228  }
    13229  }
    13230 }
    13231 
    13232 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
    13233 {
    13234  const size_t blockCount = m_pBlockVector->GetBlockCount();
    13235  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    13236  {
    13237  VmaBlockMetadata_Generic* const pMetadata =
    13238  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
    13239  const VkDeviceSize blockSize = pMetadata->GetSize();
    13240 
    13241  // No allocations in this block - entire area is free.
    13242  if(pMetadata->m_Suballocations.empty())
    13243  {
    13244  pMetadata->m_FreeCount = 1;
    13245  //pMetadata->m_SumFreeSize is already set to blockSize.
    13246  VmaSuballocation suballoc = {
    13247  0, // offset
    13248  blockSize, // size
    13249  VMA_NULL, // hAllocation
    13250  VMA_SUBALLOCATION_TYPE_FREE };
    13251  pMetadata->m_Suballocations.push_back(suballoc);
    13252  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
    13253  }
    13254  // There are some allocations in this block.
    13255  else
    13256  {
    13257  VkDeviceSize offset = 0;
    13258  VmaSuballocationList::iterator it;
    13259  for(it = pMetadata->m_Suballocations.begin();
    13260  it != pMetadata->m_Suballocations.end();
    13261  ++it)
    13262  {
    13263  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
    13264  VMA_ASSERT(it->offset >= offset);
    13265 
    13266  // Need to insert preceding free space.
    13267  if(it->offset > offset)
    13268  {
    13269  ++pMetadata->m_FreeCount;
    13270  const VkDeviceSize freeSize = it->offset - offset;
    13271  VmaSuballocation suballoc = {
    13272  offset, // offset
    13273  freeSize, // size
    13274  VMA_NULL, // hAllocation
    13275  VMA_SUBALLOCATION_TYPE_FREE };
    13276  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
    13277  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    13278  {
    13279  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
    13280  }
    13281  }
    13282 
    13283  pMetadata->m_SumFreeSize -= it->size;
    13284  offset = it->offset + it->size;
    13285  }
    13286 
    13287  // Need to insert trailing free space.
    13288  if(offset < blockSize)
    13289  {
    13290  ++pMetadata->m_FreeCount;
    13291  const VkDeviceSize freeSize = blockSize - offset;
    13292  VmaSuballocation suballoc = {
    13293  offset, // offset
    13294  freeSize, // size
    13295  VMA_NULL, // hAllocation
    13296  VMA_SUBALLOCATION_TYPE_FREE };
    13297  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
    13298  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
    13299  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    13300  {
    13301  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
    13302  }
    13303  }
    13304 
    13305  VMA_SORT(
    13306  pMetadata->m_FreeSuballocationsBySize.begin(),
    13307  pMetadata->m_FreeSuballocationsBySize.end(),
    13308  VmaSuballocationItemSizeLess());
    13309  }
    13310 
    13311  VMA_HEAVY_ASSERT(pMetadata->Validate());
    13312  }
    13313 }
    13314 
    13315 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
    13316 {
    13317  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
    13318  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
    13319  while(it != pMetadata->m_Suballocations.end())
    13320  {
    13321  if(it->offset < suballoc.offset)
    13322  {
    13323  ++it;
    13324  }
    13325  }
    13326  pMetadata->m_Suballocations.insert(it, suballoc);
    13327 }
    13328 
    13330 // VmaBlockVectorDefragmentationContext
    13331 
    13332 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
    13333  VmaAllocator hAllocator,
    13334  VmaPool hCustomPool,
    13335  VmaBlockVector* pBlockVector,
    13336  uint32_t currFrameIndex) :
    13337  res(VK_SUCCESS),
    13338  mutexLocked(false),
    13339  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
    13340  m_hAllocator(hAllocator),
    13341  m_hCustomPool(hCustomPool),
    13342  m_pBlockVector(pBlockVector),
    13343  m_CurrFrameIndex(currFrameIndex),
    13344  m_pAlgorithm(VMA_NULL),
    13345  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
    13346  m_AllAllocations(false)
    13347 {
    13348 }
    13349 
    13350 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
    13351 {
    13352  vma_delete(m_hAllocator, m_pAlgorithm);
    13353 }
    13354 
    13355 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    13356 {
    13357  AllocInfo info = { hAlloc, pChanged };
    13358  m_Allocations.push_back(info);
    13359 }
    13360 
    13361 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
    13362 {
    13363  const bool allAllocations = m_AllAllocations ||
    13364  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
    13365 
    13366  /********************************
    13367  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
    13368  ********************************/
    13369 
    13370  /*
    13371  Fast algorithm is supported only when certain criteria are met:
    13372  - VMA_DEBUG_MARGIN is 0.
    13373  - All allocations in this block vector are moveable.
    13374  - There is no possibility of image/buffer granularity conflict.
    13375  */
    13376  if(VMA_DEBUG_MARGIN == 0 &&
    13377  allAllocations &&
    13378  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
    13379  {
    13380  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
    13381  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
    13382  }
    13383  else
    13384  {
    13385  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
    13386  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
    13387  }
    13388 
    13389  if(allAllocations)
    13390  {
    13391  m_pAlgorithm->AddAll();
    13392  }
    13393  else
    13394  {
    13395  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
    13396  {
    13397  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
    13398  }
    13399  }
    13400 }
    13401 
    13403 // VmaDefragmentationContext
    13404 
    13405 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
    13406  VmaAllocator hAllocator,
    13407  uint32_t currFrameIndex,
    13408  uint32_t flags,
    13409  VmaDefragmentationStats* pStats) :
    13410  m_hAllocator(hAllocator),
    13411  m_CurrFrameIndex(currFrameIndex),
    13412  m_Flags(flags),
    13413  m_pStats(pStats),
    13414  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
    13415 {
    13416  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
    13417 }
    13418 
    13419 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
    13420 {
    13421  for(size_t i = m_CustomPoolContexts.size(); i--; )
    13422  {
    13423  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
    13424  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
    13425  vma_delete(m_hAllocator, pBlockVectorCtx);
    13426  }
    13427  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
    13428  {
    13429  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
    13430  if(pBlockVectorCtx)
    13431  {
    13432  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
    13433  vma_delete(m_hAllocator, pBlockVectorCtx);
    13434  }
    13435  }
    13436 }
    13437 
    13438 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
    13439 {
    13440  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    13441  {
    13442  VmaPool pool = pPools[poolIndex];
    13443  VMA_ASSERT(pool);
    13444  // Pools with algorithm other than default are not defragmented.
    13445  if(pool->m_BlockVector.GetAlgorithm() == 0)
    13446  {
    13447  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
    13448 
    13449  for(size_t i = m_CustomPoolContexts.size(); i--; )
    13450  {
    13451  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
    13452  {
    13453  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
    13454  break;
    13455  }
    13456  }
    13457 
    13458  if(!pBlockVectorDefragCtx)
    13459  {
    13460  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
    13461  m_hAllocator,
    13462  pool,
    13463  &pool->m_BlockVector,
    13464  m_CurrFrameIndex);
    13465  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
    13466  }
    13467 
    13468  pBlockVectorDefragCtx->AddAll();
    13469  }
    13470  }
    13471 }
    13472 
    13473 void VmaDefragmentationContext_T::AddAllocations(
    13474  uint32_t allocationCount,
    13475  VmaAllocation* pAllocations,
    13476  VkBool32* pAllocationsChanged)
    13477 {
    13478  // Dispatch pAllocations among defragmentators. Create them when necessary.
    13479  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    13480  {
    13481  const VmaAllocation hAlloc = pAllocations[allocIndex];
    13482  VMA_ASSERT(hAlloc);
    13483  // DedicatedAlloc cannot be defragmented.
    13484  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    13485  // Lost allocation cannot be defragmented.
    13486  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    13487  {
    13488  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
    13489 
    13490  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
    13491  // This allocation belongs to custom pool.
    13492  if(hAllocPool != VK_NULL_HANDLE)
    13493  {
    13494  // Pools with algorithm other than default are not defragmented.
    13495  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
    13496  {
    13497  for(size_t i = m_CustomPoolContexts.size(); i--; )
    13498  {
    13499  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
    13500  {
    13501  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
    13502  break;
    13503  }
    13504  }
    13505  if(!pBlockVectorDefragCtx)
    13506  {
    13507  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
    13508  m_hAllocator,
    13509  hAllocPool,
    13510  &hAllocPool->m_BlockVector,
    13511  m_CurrFrameIndex);
    13512  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
    13513  }
    13514  }
    13515  }
    13516  // This allocation belongs to default pool.
    13517  else
    13518  {
    13519  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    13520  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
    13521  if(!pBlockVectorDefragCtx)
    13522  {
    13523  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
    13524  m_hAllocator,
    13525  VMA_NULL, // hCustomPool
    13526  m_hAllocator->m_pBlockVectors[memTypeIndex],
    13527  m_CurrFrameIndex);
    13528  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
    13529  }
    13530  }
    13531 
    13532  if(pBlockVectorDefragCtx)
    13533  {
    13534  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    13535  &pAllocationsChanged[allocIndex] : VMA_NULL;
    13536  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
    13537  }
    13538  }
    13539  }
    13540 }
    13541 
    13542 VkResult VmaDefragmentationContext_T::Defragment(
    13543  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
    13544  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
    13545  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
    13546 {
    13547  if(pStats)
    13548  {
    13549  memset(pStats, 0, sizeof(VmaDefragmentationStats));
    13550  }
    13551 
    13552  if(commandBuffer == VK_NULL_HANDLE)
    13553  {
    13554  maxGpuBytesToMove = 0;
    13555  maxGpuAllocationsToMove = 0;
    13556  }
    13557 
    13558  VkResult res = VK_SUCCESS;
    13559 
    13560  // Process default pools.
    13561  for(uint32_t memTypeIndex = 0;
    13562  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
    13563  ++memTypeIndex)
    13564  {
    13565  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
    13566  if(pBlockVectorCtx)
    13567  {
    13568  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
    13569  pBlockVectorCtx->GetBlockVector()->Defragment(
    13570  pBlockVectorCtx,
    13571  pStats,
    13572  maxCpuBytesToMove, maxCpuAllocationsToMove,
    13573  maxGpuBytesToMove, maxGpuAllocationsToMove,
    13574  commandBuffer);
    13575  if(pBlockVectorCtx->res != VK_SUCCESS)
    13576  {
    13577  res = pBlockVectorCtx->res;
    13578  }
    13579  }
    13580  }
    13581 
    13582  // Process custom pools.
    13583  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
    13584  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
    13585  ++customCtxIndex)
    13586  {
    13587  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
    13588  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
    13589  pBlockVectorCtx->GetBlockVector()->Defragment(
    13590  pBlockVectorCtx,
    13591  pStats,
    13592  maxCpuBytesToMove, maxCpuAllocationsToMove,
    13593  maxGpuBytesToMove, maxGpuAllocationsToMove,
    13594  commandBuffer);
    13595  if(pBlockVectorCtx->res != VK_SUCCESS)
    13596  {
    13597  res = pBlockVectorCtx->res;
    13598  }
    13599  }
    13600 
    13601  return res;
    13602 }
    13603 
    13605 // VmaRecorder
    13606 
    13607 #if VMA_RECORDING_ENABLED
    13608 
    13609 VmaRecorder::VmaRecorder() :
    13610  m_UseMutex(true),
    13611  m_Flags(0),
    13612  m_File(VMA_NULL),
    13613  m_Freq(INT64_MAX),
    13614  m_StartCounter(INT64_MAX)
    13615 {
    13616 }
    13617 
    13618 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
    13619 {
    13620  m_UseMutex = useMutex;
    13621  m_Flags = settings.flags;
    13622 
    13623  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
    13624  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
    13625 
    13626  // Open file for writing.
    13627  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
    13628  if(err != 0)
    13629  {
    13630  return VK_ERROR_INITIALIZATION_FAILED;
    13631  }
    13632 
    13633  // Write header.
    13634  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
    13635  fprintf(m_File, "%s\n", "1,5");
    13636 
    13637  return VK_SUCCESS;
    13638 }
    13639 
    13640 VmaRecorder::~VmaRecorder()
    13641 {
    13642  if(m_File != VMA_NULL)
    13643  {
    13644  fclose(m_File);
    13645  }
    13646 }
    13647 
    13648 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
    13649 {
    13650  CallParams callParams;
    13651  GetBasicParams(callParams);
    13652 
    13653  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13654  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
    13655  Flush();
    13656 }
    13657 
    13658 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
    13659 {
    13660  CallParams callParams;
    13661  GetBasicParams(callParams);
    13662 
    13663  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13664  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
    13665  Flush();
    13666 }
    13667 
    13668 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
    13669 {
    13670  CallParams callParams;
    13671  GetBasicParams(callParams);
    13672 
    13673  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13674  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
    13675  createInfo.memoryTypeIndex,
    13676  createInfo.flags,
    13677  createInfo.blockSize,
    13678  (uint64_t)createInfo.minBlockCount,
    13679  (uint64_t)createInfo.maxBlockCount,
    13680  createInfo.frameInUseCount,
    13681  pool);
    13682  Flush();
    13683 }
    13684 
    13685 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
    13686 {
    13687  CallParams callParams;
    13688  GetBasicParams(callParams);
    13689 
    13690  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13691  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
    13692  pool);
    13693  Flush();
    13694 }
    13695 
    13696 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
    13697  const VkMemoryRequirements& vkMemReq,
    13698  const VmaAllocationCreateInfo& createInfo,
    13699  VmaAllocation allocation)
    13700 {
    13701  CallParams callParams;
    13702  GetBasicParams(callParams);
    13703 
    13704  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13705  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13706  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13707  vkMemReq.size,
    13708  vkMemReq.alignment,
    13709  vkMemReq.memoryTypeBits,
    13710  createInfo.flags,
    13711  createInfo.usage,
    13712  createInfo.requiredFlags,
    13713  createInfo.preferredFlags,
    13714  createInfo.memoryTypeBits,
    13715  createInfo.pool,
    13716  allocation,
    13717  userDataStr.GetString());
    13718  Flush();
    13719 }
    13720 
    13721 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
    13722  const VkMemoryRequirements& vkMemReq,
    13723  const VmaAllocationCreateInfo& createInfo,
    13724  uint64_t allocationCount,
    13725  const VmaAllocation* pAllocations)
    13726 {
    13727  CallParams callParams;
    13728  GetBasicParams(callParams);
    13729 
    13730  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13731  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13732  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
    13733  vkMemReq.size,
    13734  vkMemReq.alignment,
    13735  vkMemReq.memoryTypeBits,
    13736  createInfo.flags,
    13737  createInfo.usage,
    13738  createInfo.requiredFlags,
    13739  createInfo.preferredFlags,
    13740  createInfo.memoryTypeBits,
    13741  createInfo.pool);
    13742  PrintPointerList(allocationCount, pAllocations);
    13743  fprintf(m_File, ",%s\n", userDataStr.GetString());
    13744  Flush();
    13745 }
    13746 
    13747 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    13748  const VkMemoryRequirements& vkMemReq,
    13749  bool requiresDedicatedAllocation,
    13750  bool prefersDedicatedAllocation,
    13751  const VmaAllocationCreateInfo& createInfo,
    13752  VmaAllocation allocation)
    13753 {
    13754  CallParams callParams;
    13755  GetBasicParams(callParams);
    13756 
    13757  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13758  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13759  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13760  vkMemReq.size,
    13761  vkMemReq.alignment,
    13762  vkMemReq.memoryTypeBits,
    13763  requiresDedicatedAllocation ? 1 : 0,
    13764  prefersDedicatedAllocation ? 1 : 0,
    13765  createInfo.flags,
    13766  createInfo.usage,
    13767  createInfo.requiredFlags,
    13768  createInfo.preferredFlags,
    13769  createInfo.memoryTypeBits,
    13770  createInfo.pool,
    13771  allocation,
    13772  userDataStr.GetString());
    13773  Flush();
    13774 }
    13775 
    13776 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
    13777  const VkMemoryRequirements& vkMemReq,
    13778  bool requiresDedicatedAllocation,
    13779  bool prefersDedicatedAllocation,
    13780  const VmaAllocationCreateInfo& createInfo,
    13781  VmaAllocation allocation)
    13782 {
    13783  CallParams callParams;
    13784  GetBasicParams(callParams);
    13785 
    13786  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13787  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13788  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13789  vkMemReq.size,
    13790  vkMemReq.alignment,
    13791  vkMemReq.memoryTypeBits,
    13792  requiresDedicatedAllocation ? 1 : 0,
    13793  prefersDedicatedAllocation ? 1 : 0,
    13794  createInfo.flags,
    13795  createInfo.usage,
    13796  createInfo.requiredFlags,
    13797  createInfo.preferredFlags,
    13798  createInfo.memoryTypeBits,
    13799  createInfo.pool,
    13800  allocation,
    13801  userDataStr.GetString());
    13802  Flush();
    13803 }
    13804 
    13805 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
    13806  VmaAllocation allocation)
    13807 {
    13808  CallParams callParams;
    13809  GetBasicParams(callParams);
    13810 
    13811  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13812  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    13813  allocation);
    13814  Flush();
    13815 }
    13816 
    13817 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
    13818  uint64_t allocationCount,
    13819  const VmaAllocation* pAllocations)
    13820 {
    13821  CallParams callParams;
    13822  GetBasicParams(callParams);
    13823 
    13824  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13825  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
    13826  PrintPointerList(allocationCount, pAllocations);
    13827  fprintf(m_File, "\n");
    13828  Flush();
    13829 }
    13830 
    13831 void VmaRecorder::RecordResizeAllocation(
    13832  uint32_t frameIndex,
    13833  VmaAllocation allocation,
    13834  VkDeviceSize newSize)
    13835 {
    13836  CallParams callParams;
    13837  GetBasicParams(callParams);
    13838 
    13839  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13840  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
    13841  allocation, newSize);
    13842  Flush();
    13843 }
    13844 
    13845 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
    13846  VmaAllocation allocation,
    13847  const void* pUserData)
    13848 {
    13849  CallParams callParams;
    13850  GetBasicParams(callParams);
    13851 
    13852  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13853  UserDataString userDataStr(
    13854  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
    13855  pUserData);
    13856  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13857  allocation,
    13858  userDataStr.GetString());
    13859  Flush();
    13860 }
    13861 
    13862 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
    13863  VmaAllocation allocation)
    13864 {
    13865  CallParams callParams;
    13866  GetBasicParams(callParams);
    13867 
    13868  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13869  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    13870  allocation);
    13871  Flush();
    13872 }
    13873 
    13874 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
    13875  VmaAllocation allocation)
    13876 {
    13877  CallParams callParams;
    13878  GetBasicParams(callParams);
    13879 
    13880  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13881  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    13882  allocation);
    13883  Flush();
    13884 }
    13885 
    13886 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
    13887  VmaAllocation allocation)
    13888 {
    13889  CallParams callParams;
    13890  GetBasicParams(callParams);
    13891 
    13892  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13893  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    13894  allocation);
    13895  Flush();
    13896 }
    13897 
    13898 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
    13899  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    13900 {
    13901  CallParams callParams;
    13902  GetBasicParams(callParams);
    13903 
    13904  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13905  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    13906  allocation,
    13907  offset,
    13908  size);
    13909  Flush();
    13910 }
    13911 
    13912 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
    13913  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    13914 {
    13915  CallParams callParams;
    13916  GetBasicParams(callParams);
    13917 
    13918  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13919  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    13920  allocation,
    13921  offset,
    13922  size);
    13923  Flush();
    13924 }
    13925 
    13926 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
    13927  const VkBufferCreateInfo& bufCreateInfo,
    13928  const VmaAllocationCreateInfo& allocCreateInfo,
    13929  VmaAllocation allocation)
    13930 {
    13931  CallParams callParams;
    13932  GetBasicParams(callParams);
    13933 
    13934  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13935  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    13936  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13937  bufCreateInfo.flags,
    13938  bufCreateInfo.size,
    13939  bufCreateInfo.usage,
    13940  bufCreateInfo.sharingMode,
    13941  allocCreateInfo.flags,
    13942  allocCreateInfo.usage,
    13943  allocCreateInfo.requiredFlags,
    13944  allocCreateInfo.preferredFlags,
    13945  allocCreateInfo.memoryTypeBits,
    13946  allocCreateInfo.pool,
    13947  allocation,
    13948  userDataStr.GetString());
    13949  Flush();
    13950 }
    13951 
    13952 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
    13953  const VkImageCreateInfo& imageCreateInfo,
    13954  const VmaAllocationCreateInfo& allocCreateInfo,
    13955  VmaAllocation allocation)
    13956 {
    13957  CallParams callParams;
    13958  GetBasicParams(callParams);
    13959 
    13960  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13961  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    13962  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13963  imageCreateInfo.flags,
    13964  imageCreateInfo.imageType,
    13965  imageCreateInfo.format,
    13966  imageCreateInfo.extent.width,
    13967  imageCreateInfo.extent.height,
    13968  imageCreateInfo.extent.depth,
    13969  imageCreateInfo.mipLevels,
    13970  imageCreateInfo.arrayLayers,
    13971  imageCreateInfo.samples,
    13972  imageCreateInfo.tiling,
    13973  imageCreateInfo.usage,
    13974  imageCreateInfo.sharingMode,
    13975  imageCreateInfo.initialLayout,
    13976  allocCreateInfo.flags,
    13977  allocCreateInfo.usage,
    13978  allocCreateInfo.requiredFlags,
    13979  allocCreateInfo.preferredFlags,
    13980  allocCreateInfo.memoryTypeBits,
    13981  allocCreateInfo.pool,
    13982  allocation,
    13983  userDataStr.GetString());
    13984  Flush();
    13985 }
    13986 
    13987 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
    13988  VmaAllocation allocation)
    13989 {
    13990  CallParams callParams;
    13991  GetBasicParams(callParams);
    13992 
    13993  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13994  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
    13995  allocation);
    13996  Flush();
    13997 }
    13998 
    13999 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
    14000  VmaAllocation allocation)
    14001 {
    14002  CallParams callParams;
    14003  GetBasicParams(callParams);
    14004 
    14005  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    14006  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
    14007  allocation);
    14008  Flush();
    14009 }
    14010 
    14011 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
    14012  VmaAllocation allocation)
    14013 {
    14014  CallParams callParams;
    14015  GetBasicParams(callParams);
    14016 
    14017  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    14018  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    14019  allocation);
    14020  Flush();
    14021 }
    14022 
    14023 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
    14024  VmaAllocation allocation)
    14025 {
    14026  CallParams callParams;
    14027  GetBasicParams(callParams);
    14028 
    14029  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    14030  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
    14031  allocation);
    14032  Flush();
    14033 }
    14034 
    14035 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
    14036  VmaPool pool)
    14037 {
    14038  CallParams callParams;
    14039  GetBasicParams(callParams);
    14040 
    14041  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    14042  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
    14043  pool);
    14044  Flush();
    14045 }
    14046 
    14047 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
    14048  const VmaDefragmentationInfo2& info,
    14050 {
    14051  CallParams callParams;
    14052  GetBasicParams(callParams);
    14053 
    14054  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    14055  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
    14056  info.flags);
    14057  PrintPointerList(info.allocationCount, info.pAllocations);
    14058  fprintf(m_File, ",");
    14059  PrintPointerList(info.poolCount, info.pPools);
    14060  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
    14061  info.maxCpuBytesToMove,
    14063  info.maxGpuBytesToMove,
    14065  info.commandBuffer,
    14066  ctx);
    14067  Flush();
    14068 }
    14069 
    14070 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
    14072 {
    14073  CallParams callParams;
    14074  GetBasicParams(callParams);
    14075 
    14076  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    14077  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
    14078  ctx);
    14079  Flush();
    14080 }
    14081 
    14082 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
    14083 {
    14084  if(pUserData != VMA_NULL)
    14085  {
    14086  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
    14087  {
    14088  m_Str = (const char*)pUserData;
    14089  }
    14090  else
    14091  {
    14092  sprintf_s(m_PtrStr, "%p", pUserData);
    14093  m_Str = m_PtrStr;
    14094  }
    14095  }
    14096  else
    14097  {
    14098  m_Str = "";
    14099  }
    14100 }
    14101 
    14102 void VmaRecorder::WriteConfiguration(
    14103  const VkPhysicalDeviceProperties& devProps,
    14104  const VkPhysicalDeviceMemoryProperties& memProps,
    14105  bool dedicatedAllocationExtensionEnabled)
    14106 {
    14107  fprintf(m_File, "Config,Begin\n");
    14108 
    14109  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
    14110  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
    14111  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
    14112  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
    14113  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
    14114  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
    14115 
    14116  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
    14117  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
    14118  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
    14119 
    14120  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
    14121  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
    14122  {
    14123  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
    14124  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
    14125  }
    14126  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
    14127  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
    14128  {
    14129  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
    14130  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
    14131  }
    14132 
    14133  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
    14134 
    14135  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
    14136  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
    14137  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
    14138  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
    14139  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
    14140  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
    14141  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
    14142  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
    14143  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    14144 
    14145  fprintf(m_File, "Config,End\n");
    14146 }
    14147 
    14148 void VmaRecorder::GetBasicParams(CallParams& outParams)
    14149 {
    14150  outParams.threadId = GetCurrentThreadId();
    14151 
    14152  LARGE_INTEGER counter;
    14153  QueryPerformanceCounter(&counter);
    14154  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
    14155 }
    14156 
    14157 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
    14158 {
    14159  if(count)
    14160  {
    14161  fprintf(m_File, "%p", pItems[0]);
    14162  for(uint64_t i = 1; i < count; ++i)
    14163  {
    14164  fprintf(m_File, " %p", pItems[i]);
    14165  }
    14166  }
    14167 }
    14168 
    14169 void VmaRecorder::Flush()
    14170 {
    14171  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
    14172  {
    14173  fflush(m_File);
    14174  }
    14175 }
    14176 
    14177 #endif // #if VMA_RECORDING_ENABLED
    14178 
    14180 // VmaAllocationObjectAllocator
    14181 
    14182 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
    14183  m_Allocator(pAllocationCallbacks, 1024)
    14184 {
    14185 }
    14186 
    14187 VmaAllocation VmaAllocationObjectAllocator::Allocate()
    14188 {
    14189  VmaMutexLock mutexLock(m_Mutex);
    14190  return m_Allocator.Alloc();
    14191 }
    14192 
    14193 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
    14194 {
    14195  VmaMutexLock mutexLock(m_Mutex);
    14196  m_Allocator.Free(hAlloc);
    14197 }
    14198 
    14200 // VmaAllocator_T
    14201 
    14202 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    14203  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    14204  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    14205  m_hDevice(pCreateInfo->device),
    14206  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    14207  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    14208  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    14209  m_AllocationObjectAllocator(&m_AllocationCallbacks),
    14210  m_PreferredLargeHeapBlockSize(0),
    14211  m_PhysicalDevice(pCreateInfo->physicalDevice),
    14212  m_CurrentFrameIndex(0),
    14213  m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
    14214  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
    14215  m_NextPoolId(0)
    14217  ,m_pRecorder(VMA_NULL)
    14218 #endif
    14219 {
    14220  if(VMA_DEBUG_DETECT_CORRUPTION)
    14221  {
    14222  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
    14223  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
    14224  }
    14225 
    14226  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    14227 
    14228 #if !(VMA_DEDICATED_ALLOCATION)
    14230  {
    14231  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
    14232  }
    14233 #endif
    14234 
    14235  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    14236  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    14237  memset(&m_MemProps, 0, sizeof(m_MemProps));
    14238 
    14239  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    14240  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    14241 
    14242  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    14243  {
    14244  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    14245  }
    14246 
    14247  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    14248  {
    14249  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    14250  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    14251  }
    14252 
    14253  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    14254 
    14255  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    14256  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    14257 
    14258  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
    14259  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
    14260  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
    14261  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
    14262 
    14263  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    14264  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    14265 
    14266  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    14267  {
    14268  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    14269  {
    14270  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    14271  if(limit != VK_WHOLE_SIZE)
    14272  {
    14273  m_HeapSizeLimit[heapIndex] = limit;
    14274  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    14275  {
    14276  m_MemProps.memoryHeaps[heapIndex].size = limit;
    14277  }
    14278  }
    14279  }
    14280  }
    14281 
    14282  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    14283  {
    14284  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    14285 
    14286  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    14287  this,
    14288  VK_NULL_HANDLE, // hParentPool
    14289  memTypeIndex,
    14290  preferredBlockSize,
    14291  0,
    14292  SIZE_MAX,
    14293  GetBufferImageGranularity(),
    14294  pCreateInfo->frameInUseCount,
    14295  false, // isCustomPool
    14296  false, // explicitBlockSize
    14297  false); // linearAlgorithm
    14298  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    14299  // becase minBlockCount is 0.
    14300  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    14301 
    14302  }
    14303 }
    14304 
    14305 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
    14306 {
    14307  VkResult res = VK_SUCCESS;
    14308 
    14309  if(pCreateInfo->pRecordSettings != VMA_NULL &&
    14310  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
    14311  {
    14312 #if VMA_RECORDING_ENABLED
    14313  m_pRecorder = vma_new(this, VmaRecorder)();
    14314  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
    14315  if(res != VK_SUCCESS)
    14316  {
    14317  return res;
    14318  }
    14319  m_pRecorder->WriteConfiguration(
    14320  m_PhysicalDeviceProperties,
    14321  m_MemProps,
    14322  m_UseKhrDedicatedAllocation);
    14323  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
    14324 #else
    14325  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
    14326  return VK_ERROR_FEATURE_NOT_PRESENT;
    14327 #endif
    14328  }
    14329 
    14330  return res;
    14331 }
    14332 
    14333 VmaAllocator_T::~VmaAllocator_T()
    14334 {
    14335 #if VMA_RECORDING_ENABLED
    14336  if(m_pRecorder != VMA_NULL)
    14337  {
    14338  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
    14339  vma_delete(this, m_pRecorder);
    14340  }
    14341 #endif
    14342 
    14343  VMA_ASSERT(m_Pools.empty());
    14344 
    14345  for(size_t i = GetMemoryTypeCount(); i--; )
    14346  {
    14347  if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
    14348  {
    14349  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
    14350  }
    14351 
    14352  vma_delete(this, m_pDedicatedAllocations[i]);
    14353  vma_delete(this, m_pBlockVectors[i]);
    14354  }
    14355 }
    14356 
    14357 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    14358 {
    14359 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    14360  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
    14361  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
    14362  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
    14363  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
    14364  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
    14365  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
    14366  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
    14367  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
    14368  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
    14369  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
    14370  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
    14371  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
    14372  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
    14373  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
    14374  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
    14375  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
    14376  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
    14377 #if VMA_DEDICATED_ALLOCATION
    14378  if(m_UseKhrDedicatedAllocation)
    14379  {
    14380  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
    14381  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
    14382  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
    14383  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
    14384  }
    14385 #endif // #if VMA_DEDICATED_ALLOCATION
    14386 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    14387 
    14388 #define VMA_COPY_IF_NOT_NULL(funcName) \
    14389  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    14390 
    14391  if(pVulkanFunctions != VMA_NULL)
    14392  {
    14393  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    14394  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    14395  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    14396  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    14397  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    14398  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    14399  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
    14400  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
    14401  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    14402  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    14403  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    14404  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    14405  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    14406  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    14407  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    14408  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    14409  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
    14410 #if VMA_DEDICATED_ALLOCATION
    14411  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    14412  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    14413 #endif
    14414  }
    14415 
    14416 #undef VMA_COPY_IF_NOT_NULL
    14417 
    14418  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    14419  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    14420  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    14421  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    14422  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    14423  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    14424  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    14425  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    14426  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
    14427  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
    14428  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    14429  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    14430  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    14431  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    14432  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    14433  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    14434  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    14435  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    14436  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
    14437 #if VMA_DEDICATED_ALLOCATION
    14438  if(m_UseKhrDedicatedAllocation)
    14439  {
    14440  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    14441  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    14442  }
    14443 #endif
    14444 }
    14445 
    14446 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    14447 {
    14448  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    14449  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    14450  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
    14451  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
    14452 }
    14453 
    14454 VkResult VmaAllocator_T::AllocateMemoryOfType(
    14455  VkDeviceSize size,
    14456  VkDeviceSize alignment,
    14457  bool dedicatedAllocation,
    14458  VkBuffer dedicatedBuffer,
    14459  VkImage dedicatedImage,
    14460  const VmaAllocationCreateInfo& createInfo,
    14461  uint32_t memTypeIndex,
    14462  VmaSuballocationType suballocType,
    14463  size_t allocationCount,
    14464  VmaAllocation* pAllocations)
    14465 {
    14466  VMA_ASSERT(pAllocations != VMA_NULL);
    14467  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
    14468 
    14469  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    14470 
    14471  // If memory type is not HOST_VISIBLE, disable MAPPED.
    14472  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    14473  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    14474  {
    14475  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    14476  }
    14477 
    14478  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    14479  VMA_ASSERT(blockVector);
    14480 
    14481  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    14482  bool preferDedicatedMemory =
    14483  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    14484  dedicatedAllocation ||
    14485  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    14486  size > preferredBlockSize / 2;
    14487 
    14488  if(preferDedicatedMemory &&
    14489  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    14490  finalCreateInfo.pool == VK_NULL_HANDLE)
    14491  {
    14493  }
    14494 
    14495  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    14496  {
    14497  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14498  {
    14499  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14500  }
    14501  else
    14502  {
    14503  return AllocateDedicatedMemory(
    14504  size,
    14505  suballocType,
    14506  memTypeIndex,
    14507  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    14508  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    14509  finalCreateInfo.pUserData,
    14510  dedicatedBuffer,
    14511  dedicatedImage,
    14512  allocationCount,
    14513  pAllocations);
    14514  }
    14515  }
    14516  else
    14517  {
    14518  VkResult res = blockVector->Allocate(
    14519  m_CurrentFrameIndex.load(),
    14520  size,
    14521  alignment,
    14522  finalCreateInfo,
    14523  suballocType,
    14524  allocationCount,
    14525  pAllocations);
    14526  if(res == VK_SUCCESS)
    14527  {
    14528  return res;
    14529  }
    14530 
    14531  // 5. Try dedicated memory.
    14532  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14533  {
    14534  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14535  }
    14536  else
    14537  {
    14538  res = AllocateDedicatedMemory(
    14539  size,
    14540  suballocType,
    14541  memTypeIndex,
    14542  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    14543  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    14544  finalCreateInfo.pUserData,
    14545  dedicatedBuffer,
    14546  dedicatedImage,
    14547  allocationCount,
    14548  pAllocations);
    14549  if(res == VK_SUCCESS)
    14550  {
    14551  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    14552  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    14553  return VK_SUCCESS;
    14554  }
    14555  else
    14556  {
    14557  // Everything failed: Return error code.
    14558  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    14559  return res;
    14560  }
    14561  }
    14562  }
    14563 }
    14564 
    14565 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    14566  VkDeviceSize size,
    14567  VmaSuballocationType suballocType,
    14568  uint32_t memTypeIndex,
    14569  bool map,
    14570  bool isUserDataString,
    14571  void* pUserData,
    14572  VkBuffer dedicatedBuffer,
    14573  VkImage dedicatedImage,
    14574  size_t allocationCount,
    14575  VmaAllocation* pAllocations)
    14576 {
    14577  VMA_ASSERT(allocationCount > 0 && pAllocations);
    14578 
    14579  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    14580  allocInfo.memoryTypeIndex = memTypeIndex;
    14581  allocInfo.allocationSize = size;
    14582 
    14583 #if VMA_DEDICATED_ALLOCATION
    14584  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    14585  if(m_UseKhrDedicatedAllocation)
    14586  {
    14587  if(dedicatedBuffer != VK_NULL_HANDLE)
    14588  {
    14589  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    14590  dedicatedAllocInfo.buffer = dedicatedBuffer;
    14591  allocInfo.pNext = &dedicatedAllocInfo;
    14592  }
    14593  else if(dedicatedImage != VK_NULL_HANDLE)
    14594  {
    14595  dedicatedAllocInfo.image = dedicatedImage;
    14596  allocInfo.pNext = &dedicatedAllocInfo;
    14597  }
    14598  }
    14599 #endif // #if VMA_DEDICATED_ALLOCATION
    14600 
    14601  size_t allocIndex;
    14602  VkResult res = VK_SUCCESS;
    14603  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    14604  {
    14605  res = AllocateDedicatedMemoryPage(
    14606  size,
    14607  suballocType,
    14608  memTypeIndex,
    14609  allocInfo,
    14610  map,
    14611  isUserDataString,
    14612  pUserData,
    14613  pAllocations + allocIndex);
    14614  if(res != VK_SUCCESS)
    14615  {
    14616  break;
    14617  }
    14618  }
    14619 
    14620  if(res == VK_SUCCESS)
    14621  {
    14622  // Register them in m_pDedicatedAllocations.
    14623  {
    14624  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    14625  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    14626  VMA_ASSERT(pDedicatedAllocations);
    14627  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    14628  {
    14629  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
    14630  }
    14631  }
    14632 
    14633  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
    14634  }
    14635  else
    14636  {
    14637  // Free all already created allocations.
    14638  while(allocIndex--)
    14639  {
    14640  VmaAllocation currAlloc = pAllocations[allocIndex];
    14641  VkDeviceMemory hMemory = currAlloc->GetMemory();
    14642 
    14643  /*
    14644  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    14645  before vkFreeMemory.
    14646 
    14647  if(currAlloc->GetMappedData() != VMA_NULL)
    14648  {
    14649  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    14650  }
    14651  */
    14652 
    14653  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
    14654 
    14655  currAlloc->SetUserData(this, VMA_NULL);
    14656  currAlloc->Dtor();
    14657  m_AllocationObjectAllocator.Free(currAlloc);
    14658  }
    14659 
    14660  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
    14661  }
    14662 
    14663  return res;
    14664 }
    14665 
    14666 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
    14667  VkDeviceSize size,
    14668  VmaSuballocationType suballocType,
    14669  uint32_t memTypeIndex,
    14670  const VkMemoryAllocateInfo& allocInfo,
    14671  bool map,
    14672  bool isUserDataString,
    14673  void* pUserData,
    14674  VmaAllocation* pAllocation)
    14675 {
    14676  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    14677  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    14678  if(res < 0)
    14679  {
    14680  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    14681  return res;
    14682  }
    14683 
    14684  void* pMappedData = VMA_NULL;
    14685  if(map)
    14686  {
    14687  res = (*m_VulkanFunctions.vkMapMemory)(
    14688  m_hDevice,
    14689  hMemory,
    14690  0,
    14691  VK_WHOLE_SIZE,
    14692  0,
    14693  &pMappedData);
    14694  if(res < 0)
    14695  {
    14696  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    14697  FreeVulkanMemory(memTypeIndex, size, hMemory);
    14698  return res;
    14699  }
    14700  }
    14701 
    14702  *pAllocation = m_AllocationObjectAllocator.Allocate();
    14703  (*pAllocation)->Ctor(m_CurrentFrameIndex.load(), isUserDataString);
    14704  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
    14705  (*pAllocation)->SetUserData(this, pUserData);
    14706  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    14707  {
    14708  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    14709  }
    14710 
    14711  return VK_SUCCESS;
    14712 }
    14713 
    14714 void VmaAllocator_T::GetBufferMemoryRequirements(
    14715  VkBuffer hBuffer,
    14716  VkMemoryRequirements& memReq,
    14717  bool& requiresDedicatedAllocation,
    14718  bool& prefersDedicatedAllocation) const
    14719 {
    14720 #if VMA_DEDICATED_ALLOCATION
    14721  if(m_UseKhrDedicatedAllocation)
    14722  {
    14723  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    14724  memReqInfo.buffer = hBuffer;
    14725 
    14726  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    14727 
    14728  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    14729  memReq2.pNext = &memDedicatedReq;
    14730 
    14731  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    14732 
    14733  memReq = memReq2.memoryRequirements;
    14734  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    14735  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    14736  }
    14737  else
    14738 #endif // #if VMA_DEDICATED_ALLOCATION
    14739  {
    14740  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    14741  requiresDedicatedAllocation = false;
    14742  prefersDedicatedAllocation = false;
    14743  }
    14744 }
    14745 
    14746 void VmaAllocator_T::GetImageMemoryRequirements(
    14747  VkImage hImage,
    14748  VkMemoryRequirements& memReq,
    14749  bool& requiresDedicatedAllocation,
    14750  bool& prefersDedicatedAllocation) const
    14751 {
    14752 #if VMA_DEDICATED_ALLOCATION
    14753  if(m_UseKhrDedicatedAllocation)
    14754  {
    14755  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    14756  memReqInfo.image = hImage;
    14757 
    14758  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    14759 
    14760  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    14761  memReq2.pNext = &memDedicatedReq;
    14762 
    14763  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    14764 
    14765  memReq = memReq2.memoryRequirements;
    14766  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    14767  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    14768  }
    14769  else
    14770 #endif // #if VMA_DEDICATED_ALLOCATION
    14771  {
    14772  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    14773  requiresDedicatedAllocation = false;
    14774  prefersDedicatedAllocation = false;
    14775  }
    14776 }
    14777 
    14778 VkResult VmaAllocator_T::AllocateMemory(
    14779  const VkMemoryRequirements& vkMemReq,
    14780  bool requiresDedicatedAllocation,
    14781  bool prefersDedicatedAllocation,
    14782  VkBuffer dedicatedBuffer,
    14783  VkImage dedicatedImage,
    14784  const VmaAllocationCreateInfo& createInfo,
    14785  VmaSuballocationType suballocType,
    14786  size_t allocationCount,
    14787  VmaAllocation* pAllocations)
    14788 {
    14789  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
    14790 
    14791  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
    14792 
    14793  if(vkMemReq.size == 0)
    14794  {
    14795  return VK_ERROR_VALIDATION_FAILED_EXT;
    14796  }
    14797  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    14798  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14799  {
    14800  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    14801  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14802  }
    14803  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    14805  {
    14806  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    14807  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14808  }
    14809  if(requiresDedicatedAllocation)
    14810  {
    14811  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14812  {
    14813  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    14814  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14815  }
    14816  if(createInfo.pool != VK_NULL_HANDLE)
    14817  {
    14818  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    14819  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14820  }
    14821  }
    14822  if((createInfo.pool != VK_NULL_HANDLE) &&
    14823  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    14824  {
    14825  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    14826  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14827  }
    14828 
    14829  if(createInfo.pool != VK_NULL_HANDLE)
    14830  {
    14831  const VkDeviceSize alignmentForPool = VMA_MAX(
    14832  vkMemReq.alignment,
    14833  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
    14834 
    14835  VmaAllocationCreateInfo createInfoForPool = createInfo;
    14836  // If memory type is not HOST_VISIBLE, disable MAPPED.
    14837  if((createInfoForPool.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    14838  (m_MemProps.memoryTypes[createInfo.pool->m_BlockVector.GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    14839  {
    14840  createInfoForPool.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    14841  }
    14842 
    14843  return createInfo.pool->m_BlockVector.Allocate(
    14844  m_CurrentFrameIndex.load(),
    14845  vkMemReq.size,
    14846  alignmentForPool,
    14847  createInfoForPool,
    14848  suballocType,
    14849  allocationCount,
    14850  pAllocations);
    14851  }
    14852  else
    14853  {
    14854  // Bit mask of memory Vulkan types acceptable for this allocation.
    14855  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    14856  uint32_t memTypeIndex = UINT32_MAX;
    14857  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    14858  if(res == VK_SUCCESS)
    14859  {
    14860  VkDeviceSize alignmentForMemType = VMA_MAX(
    14861  vkMemReq.alignment,
    14862  GetMemoryTypeMinAlignment(memTypeIndex));
    14863 
    14864  res = AllocateMemoryOfType(
    14865  vkMemReq.size,
    14866  alignmentForMemType,
    14867  requiresDedicatedAllocation || prefersDedicatedAllocation,
    14868  dedicatedBuffer,
    14869  dedicatedImage,
    14870  createInfo,
    14871  memTypeIndex,
    14872  suballocType,
    14873  allocationCount,
    14874  pAllocations);
    14875  // Succeeded on first try.
    14876  if(res == VK_SUCCESS)
    14877  {
    14878  return res;
    14879  }
    14880  // Allocation from this memory type failed. Try other compatible memory types.
    14881  else
    14882  {
    14883  for(;;)
    14884  {
    14885  // Remove old memTypeIndex from list of possibilities.
    14886  memoryTypeBits &= ~(1u << memTypeIndex);
    14887  // Find alternative memTypeIndex.
    14888  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    14889  if(res == VK_SUCCESS)
    14890  {
    14891  alignmentForMemType = VMA_MAX(
    14892  vkMemReq.alignment,
    14893  GetMemoryTypeMinAlignment(memTypeIndex));
    14894 
    14895  res = AllocateMemoryOfType(
    14896  vkMemReq.size,
    14897  alignmentForMemType,
    14898  requiresDedicatedAllocation || prefersDedicatedAllocation,
    14899  dedicatedBuffer,
    14900  dedicatedImage,
    14901  createInfo,
    14902  memTypeIndex,
    14903  suballocType,
    14904  allocationCount,
    14905  pAllocations);
    14906  // Allocation from this alternative memory type succeeded.
    14907  if(res == VK_SUCCESS)
    14908  {
    14909  return res;
    14910  }
    14911  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    14912  }
    14913  // No other matching memory type index could be found.
    14914  else
    14915  {
    14916  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    14917  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14918  }
    14919  }
    14920  }
    14921  }
    14922  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    14923  else
    14924  return res;
    14925  }
    14926 }
    14927 
    14928 void VmaAllocator_T::FreeMemory(
    14929  size_t allocationCount,
    14930  const VmaAllocation* pAllocations)
    14931 {
    14932  VMA_ASSERT(pAllocations);
    14933 
    14934  for(size_t allocIndex = allocationCount; allocIndex--; )
    14935  {
    14936  VmaAllocation allocation = pAllocations[allocIndex];
    14937 
    14938  if(allocation != VK_NULL_HANDLE)
    14939  {
    14940  if(TouchAllocation(allocation))
    14941  {
    14942  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    14943  {
    14944  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
    14945  }
    14946 
    14947  switch(allocation->GetType())
    14948  {
    14949  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    14950  {
    14951  VmaBlockVector* pBlockVector = VMA_NULL;
    14952  VmaPool hPool = allocation->GetBlock()->GetParentPool();
    14953  if(hPool != VK_NULL_HANDLE)
    14954  {
    14955  pBlockVector = &hPool->m_BlockVector;
    14956  }
    14957  else
    14958  {
    14959  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    14960  pBlockVector = m_pBlockVectors[memTypeIndex];
    14961  }
    14962  pBlockVector->Free(allocation);
    14963  }
    14964  break;
    14965  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    14966  FreeDedicatedMemory(allocation);
    14967  break;
    14968  default:
    14969  VMA_ASSERT(0);
    14970  }
    14971  }
    14972 
    14973  allocation->SetUserData(this, VMA_NULL);
    14974  allocation->Dtor();
    14975  m_AllocationObjectAllocator.Free(allocation);
    14976  }
    14977  }
    14978 }
    14979 
    14980 VkResult VmaAllocator_T::ResizeAllocation(
    14981  const VmaAllocation alloc,
    14982  VkDeviceSize newSize)
    14983 {
    14984  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
    14985  {
    14986  return VK_ERROR_VALIDATION_FAILED_EXT;
    14987  }
    14988  if(newSize == alloc->GetSize())
    14989  {
    14990  return VK_SUCCESS;
    14991  }
    14992 
    14993  switch(alloc->GetType())
    14994  {
    14995  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    14996  return VK_ERROR_FEATURE_NOT_PRESENT;
    14997  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    14998  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
    14999  {
    15000  alloc->ChangeSize(newSize);
    15001  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
    15002  return VK_SUCCESS;
    15003  }
    15004  else
    15005  {
    15006  return VK_ERROR_OUT_OF_POOL_MEMORY;
    15007  }
    15008  default:
    15009  VMA_ASSERT(0);
    15010  return VK_ERROR_VALIDATION_FAILED_EXT;
    15011  }
    15012 }
    15013 
    15014 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    15015 {
    15016  // Initialize.
    15017  InitStatInfo(pStats->total);
    15018  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    15019  InitStatInfo(pStats->memoryType[i]);
    15020  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    15021  InitStatInfo(pStats->memoryHeap[i]);
    15022 
    15023  // Process default pools.
    15024  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15025  {
    15026  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    15027  VMA_ASSERT(pBlockVector);
    15028  pBlockVector->AddStats(pStats);
    15029  }
    15030 
    15031  // Process custom pools.
    15032  {
    15033  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
    15034  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    15035  {
    15036  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
    15037  }
    15038  }
    15039 
    15040  // Process dedicated allocations.
    15041  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15042  {
    15043  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    15044  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    15045  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    15046  VMA_ASSERT(pDedicatedAllocVector);
    15047  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    15048  {
    15049  VmaStatInfo allocationStatInfo;
    15050  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    15051  VmaAddStatInfo(pStats->total, allocationStatInfo);
    15052  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    15053  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    15054  }
    15055  }
    15056 
    15057  // Postprocess.
    15058  VmaPostprocessCalcStatInfo(pStats->total);
    15059  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    15060  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    15061  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    15062  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    15063 }
    15064 
    15065 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    15066 
    15067 VkResult VmaAllocator_T::DefragmentationBegin(
    15068  const VmaDefragmentationInfo2& info,
    15069  VmaDefragmentationStats* pStats,
    15070  VmaDefragmentationContext* pContext)
    15071 {
    15072  if(info.pAllocationsChanged != VMA_NULL)
    15073  {
    15074  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
    15075  }
    15076 
    15077  *pContext = vma_new(this, VmaDefragmentationContext_T)(
    15078  this, m_CurrentFrameIndex.load(), info.flags, pStats);
    15079 
    15080  (*pContext)->AddPools(info.poolCount, info.pPools);
    15081  (*pContext)->AddAllocations(
    15083 
    15084  VkResult res = (*pContext)->Defragment(
    15087  info.commandBuffer, pStats);
    15088 
    15089  if(res != VK_NOT_READY)
    15090  {
    15091  vma_delete(this, *pContext);
    15092  *pContext = VMA_NULL;
    15093  }
    15094 
    15095  return res;
    15096 }
    15097 
    15098 VkResult VmaAllocator_T::DefragmentationEnd(
    15099  VmaDefragmentationContext context)
    15100 {
    15101  vma_delete(this, context);
    15102  return VK_SUCCESS;
    15103 }
    15104 
    15105 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    15106 {
    15107  if(hAllocation->CanBecomeLost())
    15108  {
    15109  /*
    15110  Warning: This is a carefully designed algorithm.
    15111  Do not modify unless you really know what you're doing :)
    15112  */
    15113  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    15114  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    15115  for(;;)
    15116  {
    15117  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    15118  {
    15119  pAllocationInfo->memoryType = UINT32_MAX;
    15120  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    15121  pAllocationInfo->offset = 0;
    15122  pAllocationInfo->size = hAllocation->GetSize();
    15123  pAllocationInfo->pMappedData = VMA_NULL;
    15124  pAllocationInfo->pUserData = hAllocation->GetUserData();
    15125  return;
    15126  }
    15127  else if(localLastUseFrameIndex == localCurrFrameIndex)
    15128  {
    15129  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    15130  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    15131  pAllocationInfo->offset = hAllocation->GetOffset();
    15132  pAllocationInfo->size = hAllocation->GetSize();
    15133  pAllocationInfo->pMappedData = VMA_NULL;
    15134  pAllocationInfo->pUserData = hAllocation->GetUserData();
    15135  return;
    15136  }
    15137  else // Last use time earlier than current time.
    15138  {
    15139  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    15140  {
    15141  localLastUseFrameIndex = localCurrFrameIndex;
    15142  }
    15143  }
    15144  }
    15145  }
    15146  else
    15147  {
    15148 #if VMA_STATS_STRING_ENABLED
    15149  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    15150  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    15151  for(;;)
    15152  {
    15153  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    15154  if(localLastUseFrameIndex == localCurrFrameIndex)
    15155  {
    15156  break;
    15157  }
    15158  else // Last use time earlier than current time.
    15159  {
    15160  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    15161  {
    15162  localLastUseFrameIndex = localCurrFrameIndex;
    15163  }
    15164  }
    15165  }
    15166 #endif
    15167 
    15168  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    15169  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    15170  pAllocationInfo->offset = hAllocation->GetOffset();
    15171  pAllocationInfo->size = hAllocation->GetSize();
    15172  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    15173  pAllocationInfo->pUserData = hAllocation->GetUserData();
    15174  }
    15175 }
    15176 
    15177 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
    15178 {
    15179  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
    15180  if(hAllocation->CanBecomeLost())
    15181  {
    15182  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    15183  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    15184  for(;;)
    15185  {
    15186  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    15187  {
    15188  return false;
    15189  }
    15190  else if(localLastUseFrameIndex == localCurrFrameIndex)
    15191  {
    15192  return true;
    15193  }
    15194  else // Last use time earlier than current time.
    15195  {
    15196  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    15197  {
    15198  localLastUseFrameIndex = localCurrFrameIndex;
    15199  }
    15200  }
    15201  }
    15202  }
    15203  else
    15204  {
    15205 #if VMA_STATS_STRING_ENABLED
    15206  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    15207  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    15208  for(;;)
    15209  {
    15210  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    15211  if(localLastUseFrameIndex == localCurrFrameIndex)
    15212  {
    15213  break;
    15214  }
    15215  else // Last use time earlier than current time.
    15216  {
    15217  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    15218  {
    15219  localLastUseFrameIndex = localCurrFrameIndex;
    15220  }
    15221  }
    15222  }
    15223 #endif
    15224 
    15225  return true;
    15226  }
    15227 }
    15228 
    15229 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    15230 {
    15231  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
    15232 
    15233  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    15234 
    15235  if(newCreateInfo.maxBlockCount == 0)
    15236  {
    15237  newCreateInfo.maxBlockCount = SIZE_MAX;
    15238  }
    15239  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
    15240  {
    15241  return VK_ERROR_INITIALIZATION_FAILED;
    15242  }
    15243 
    15244  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    15245 
    15246  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
    15247 
    15248  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    15249  if(res != VK_SUCCESS)
    15250  {
    15251  vma_delete(this, *pPool);
    15252  *pPool = VMA_NULL;
    15253  return res;
    15254  }
    15255 
    15256  // Add to m_Pools.
    15257  {
    15258  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
    15259  (*pPool)->SetId(m_NextPoolId++);
    15260  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    15261  }
    15262 
    15263  return VK_SUCCESS;
    15264 }
    15265 
    15266 void VmaAllocator_T::DestroyPool(VmaPool pool)
    15267 {
    15268  // Remove from m_Pools.
    15269  {
    15270  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
    15271  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    15272  VMA_ASSERT(success && "Pool not found in Allocator.");
    15273  }
    15274 
    15275  vma_delete(this, pool);
    15276 }
    15277 
    15278 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    15279 {
    15280  pool->m_BlockVector.GetPoolStats(pPoolStats);
    15281 }
    15282 
    15283 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    15284 {
    15285  m_CurrentFrameIndex.store(frameIndex);
    15286 }
    15287 
    15288 void VmaAllocator_T::MakePoolAllocationsLost(
    15289  VmaPool hPool,
    15290  size_t* pLostAllocationCount)
    15291 {
    15292  hPool->m_BlockVector.MakePoolAllocationsLost(
    15293  m_CurrentFrameIndex.load(),
    15294  pLostAllocationCount);
    15295 }
    15296 
    15297 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
    15298 {
    15299  return hPool->m_BlockVector.CheckCorruption();
    15300 }
    15301 
    15302 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
    15303 {
    15304  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
    15305 
    15306  // Process default pools.
    15307  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15308  {
    15309  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
    15310  {
    15311  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    15312  VMA_ASSERT(pBlockVector);
    15313  VkResult localRes = pBlockVector->CheckCorruption();
    15314  switch(localRes)
    15315  {
    15316  case VK_ERROR_FEATURE_NOT_PRESENT:
    15317  break;
    15318  case VK_SUCCESS:
    15319  finalRes = VK_SUCCESS;
    15320  break;
    15321  default:
    15322  return localRes;
    15323  }
    15324  }
    15325  }
    15326 
    15327  // Process custom pools.
    15328  {
    15329  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
    15330  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    15331  {
    15332  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
    15333  {
    15334  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
    15335  switch(localRes)
    15336  {
    15337  case VK_ERROR_FEATURE_NOT_PRESENT:
    15338  break;
    15339  case VK_SUCCESS:
    15340  finalRes = VK_SUCCESS;
    15341  break;
    15342  default:
    15343  return localRes;
    15344  }
    15345  }
    15346  }
    15347  }
    15348 
    15349  return finalRes;
    15350 }
    15351 
    15352 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    15353 {
    15354  *pAllocation = m_AllocationObjectAllocator.Allocate();
    15355  (*pAllocation)->Ctor(VMA_FRAME_INDEX_LOST, false);
    15356  (*pAllocation)->InitLost();
    15357 }
    15358 
    15359 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    15360 {
    15361  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    15362 
    15363  VkResult res;
    15364  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    15365  {
    15366  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    15367  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    15368  {
    15369  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    15370  if(res == VK_SUCCESS)
    15371  {
    15372  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    15373  }
    15374  }
    15375  else
    15376  {
    15377  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    15378  }
    15379  }
    15380  else
    15381  {
    15382  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    15383  }
    15384 
    15385  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    15386  {
    15387  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    15388  }
    15389 
    15390  return res;
    15391 }
    15392 
    15393 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    15394 {
    15395  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    15396  {
    15397  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    15398  }
    15399 
    15400  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    15401 
    15402  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    15403  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    15404  {
    15405  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    15406  m_HeapSizeLimit[heapIndex] += size;
    15407  }
    15408 }
    15409 
    15410 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    15411 {
    15412  if(hAllocation->CanBecomeLost())
    15413  {
    15414  return VK_ERROR_MEMORY_MAP_FAILED;
    15415  }
    15416 
    15417  switch(hAllocation->GetType())
    15418  {
    15419  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15420  {
    15421  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    15422  char *pBytes = VMA_NULL;
    15423  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
    15424  if(res == VK_SUCCESS)
    15425  {
    15426  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    15427  hAllocation->BlockAllocMap();
    15428  }
    15429  return res;
    15430  }
    15431  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15432  return hAllocation->DedicatedAllocMap(this, ppData);
    15433  default:
    15434  VMA_ASSERT(0);
    15435  return VK_ERROR_MEMORY_MAP_FAILED;
    15436  }
    15437 }
    15438 
    15439 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    15440 {
    15441  switch(hAllocation->GetType())
    15442  {
    15443  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15444  {
    15445  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    15446  hAllocation->BlockAllocUnmap();
    15447  pBlock->Unmap(this, 1);
    15448  }
    15449  break;
    15450  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15451  hAllocation->DedicatedAllocUnmap(this);
    15452  break;
    15453  default:
    15454  VMA_ASSERT(0);
    15455  }
    15456 }
    15457 
    15458 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
    15459 {
    15460  VkResult res = VK_SUCCESS;
    15461  switch(hAllocation->GetType())
    15462  {
    15463  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15464  res = GetVulkanFunctions().vkBindBufferMemory(
    15465  m_hDevice,
    15466  hBuffer,
    15467  hAllocation->GetMemory(),
    15468  0); //memoryOffset
    15469  break;
    15470  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15471  {
    15472  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    15473  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
    15474  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
    15475  break;
    15476  }
    15477  default:
    15478  VMA_ASSERT(0);
    15479  }
    15480  return res;
    15481 }
    15482 
    15483 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
    15484 {
    15485  VkResult res = VK_SUCCESS;
    15486  switch(hAllocation->GetType())
    15487  {
    15488  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15489  res = GetVulkanFunctions().vkBindImageMemory(
    15490  m_hDevice,
    15491  hImage,
    15492  hAllocation->GetMemory(),
    15493  0); //memoryOffset
    15494  break;
    15495  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15496  {
    15497  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    15498  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
    15499  res = pBlock->BindImageMemory(this, hAllocation, hImage);
    15500  break;
    15501  }
    15502  default:
    15503  VMA_ASSERT(0);
    15504  }
    15505  return res;
    15506 }
    15507 
    15508 void VmaAllocator_T::FlushOrInvalidateAllocation(
    15509  VmaAllocation hAllocation,
    15510  VkDeviceSize offset, VkDeviceSize size,
    15511  VMA_CACHE_OPERATION op)
    15512 {
    15513  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
    15514  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
    15515  {
    15516  const VkDeviceSize allocationSize = hAllocation->GetSize();
    15517  VMA_ASSERT(offset <= allocationSize);
    15518 
    15519  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    15520 
    15521  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    15522  memRange.memory = hAllocation->GetMemory();
    15523 
    15524  switch(hAllocation->GetType())
    15525  {
    15526  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15527  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    15528  if(size == VK_WHOLE_SIZE)
    15529  {
    15530  memRange.size = allocationSize - memRange.offset;
    15531  }
    15532  else
    15533  {
    15534  VMA_ASSERT(offset + size <= allocationSize);
    15535  memRange.size = VMA_MIN(
    15536  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
    15537  allocationSize - memRange.offset);
    15538  }
    15539  break;
    15540 
    15541  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15542  {
    15543  // 1. Still within this allocation.
    15544  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    15545  if(size == VK_WHOLE_SIZE)
    15546  {
    15547  size = allocationSize - offset;
    15548  }
    15549  else
    15550  {
    15551  VMA_ASSERT(offset + size <= allocationSize);
    15552  }
    15553  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
    15554 
    15555  // 2. Adjust to whole block.
    15556  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
    15557  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
    15558  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
    15559  memRange.offset += allocationOffset;
    15560  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
    15561 
    15562  break;
    15563  }
    15564 
    15565  default:
    15566  VMA_ASSERT(0);
    15567  }
    15568 
    15569  switch(op)
    15570  {
    15571  case VMA_CACHE_FLUSH:
    15572  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
    15573  break;
    15574  case VMA_CACHE_INVALIDATE:
    15575  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
    15576  break;
    15577  default:
    15578  VMA_ASSERT(0);
    15579  }
    15580  }
    15581  // else: Just ignore this call.
    15582 }
    15583 
    15584 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    15585 {
    15586  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    15587 
    15588  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    15589  {
    15590  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    15591  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    15592  VMA_ASSERT(pDedicatedAllocations);
    15593  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    15594  VMA_ASSERT(success);
    15595  }
    15596 
    15597  VkDeviceMemory hMemory = allocation->GetMemory();
    15598 
    15599  /*
    15600  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    15601  before vkFreeMemory.
    15602 
    15603  if(allocation->GetMappedData() != VMA_NULL)
    15604  {
    15605  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    15606  }
    15607  */
    15608 
    15609  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    15610 
    15611  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    15612 }
    15613 
    15614 uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
    15615 {
    15616  VkBufferCreateInfo dummyBufCreateInfo;
    15617  VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
    15618 
    15619  uint32_t memoryTypeBits = 0;
    15620 
    15621  // Create buffer.
    15622  VkBuffer buf = VK_NULL_HANDLE;
    15623  VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
    15624  m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
    15625  if(res == VK_SUCCESS)
    15626  {
    15627  // Query for supported memory types.
    15628  VkMemoryRequirements memReq;
    15629  (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
    15630  memoryTypeBits = memReq.memoryTypeBits;
    15631 
    15632  // Destroy buffer.
    15633  (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
    15634  }
    15635 
    15636  return memoryTypeBits;
    15637 }
    15638 
    15639 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
    15640 {
    15641  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
    15642  !hAllocation->CanBecomeLost() &&
    15643  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    15644  {
    15645  void* pData = VMA_NULL;
    15646  VkResult res = Map(hAllocation, &pData);
    15647  if(res == VK_SUCCESS)
    15648  {
    15649  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
    15650  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
    15651  Unmap(hAllocation);
    15652  }
    15653  else
    15654  {
    15655  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
    15656  }
    15657  }
    15658 }
    15659 
    15660 uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
    15661 {
    15662  uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
    15663  if(memoryTypeBits == UINT32_MAX)
    15664  {
    15665  memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
    15666  m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
    15667  }
    15668  return memoryTypeBits;
    15669 }
    15670 
    15671 #if VMA_STATS_STRING_ENABLED
    15672 
    15673 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    15674 {
    15675  bool dedicatedAllocationsStarted = false;
    15676  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15677  {
    15678  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    15679  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    15680  VMA_ASSERT(pDedicatedAllocVector);
    15681  if(pDedicatedAllocVector->empty() == false)
    15682  {
    15683  if(dedicatedAllocationsStarted == false)
    15684  {
    15685  dedicatedAllocationsStarted = true;
    15686  json.WriteString("DedicatedAllocations");
    15687  json.BeginObject();
    15688  }
    15689 
    15690  json.BeginString("Type ");
    15691  json.ContinueString(memTypeIndex);
    15692  json.EndString();
    15693 
    15694  json.BeginArray();
    15695 
    15696  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    15697  {
    15698  json.BeginObject(true);
    15699  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    15700  hAlloc->PrintParameters(json);
    15701  json.EndObject();
    15702  }
    15703 
    15704  json.EndArray();
    15705  }
    15706  }
    15707  if(dedicatedAllocationsStarted)
    15708  {
    15709  json.EndObject();
    15710  }
    15711 
    15712  {
    15713  bool allocationsStarted = false;
    15714  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15715  {
    15716  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    15717  {
    15718  if(allocationsStarted == false)
    15719  {
    15720  allocationsStarted = true;
    15721  json.WriteString("DefaultPools");
    15722  json.BeginObject();
    15723  }
    15724 
    15725  json.BeginString("Type ");
    15726  json.ContinueString(memTypeIndex);
    15727  json.EndString();
    15728 
    15729  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    15730  }
    15731  }
    15732  if(allocationsStarted)
    15733  {
    15734  json.EndObject();
    15735  }
    15736  }
    15737 
    15738  // Custom pools
    15739  {
    15740  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
    15741  const size_t poolCount = m_Pools.size();
    15742  if(poolCount > 0)
    15743  {
    15744  json.WriteString("Pools");
    15745  json.BeginObject();
    15746  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    15747  {
    15748  json.BeginString();
    15749  json.ContinueString(m_Pools[poolIndex]->GetId());
    15750  json.EndString();
    15751 
    15752  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    15753  }
    15754  json.EndObject();
    15755  }
    15756  }
    15757 }
    15758 
    15759 #endif // #if VMA_STATS_STRING_ENABLED
    15760 
    15762 // Public interface
    15763 
    15764 VkResult vmaCreateAllocator(
    15765  const VmaAllocatorCreateInfo* pCreateInfo,
    15766  VmaAllocator* pAllocator)
    15767 {
    15768  VMA_ASSERT(pCreateInfo && pAllocator);
    15769  VMA_DEBUG_LOG("vmaCreateAllocator");
    15770  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    15771  return (*pAllocator)->Init(pCreateInfo);
    15772 }
    15773 
    15774 void vmaDestroyAllocator(
    15775  VmaAllocator allocator)
    15776 {
    15777  if(allocator != VK_NULL_HANDLE)
    15778  {
    15779  VMA_DEBUG_LOG("vmaDestroyAllocator");
    15780  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    15781  vma_delete(&allocationCallbacks, allocator);
    15782  }
    15783 }
    15784 
    15786  VmaAllocator allocator,
    15787  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    15788 {
    15789  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    15790  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    15791 }
    15792 
    15794  VmaAllocator allocator,
    15795  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    15796 {
    15797  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    15798  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    15799 }
    15800 
    15802  VmaAllocator allocator,
    15803  uint32_t memoryTypeIndex,
    15804  VkMemoryPropertyFlags* pFlags)
    15805 {
    15806  VMA_ASSERT(allocator && pFlags);
    15807  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    15808  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    15809 }
    15810 
    15812  VmaAllocator allocator,
    15813  uint32_t frameIndex)
    15814 {
    15815  VMA_ASSERT(allocator);
    15816  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    15817 
    15818  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15819 
    15820  allocator->SetCurrentFrameIndex(frameIndex);
    15821 }
    15822 
    15823 void vmaCalculateStats(
    15824  VmaAllocator allocator,
    15825  VmaStats* pStats)
    15826 {
    15827  VMA_ASSERT(allocator && pStats);
    15828  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15829  allocator->CalculateStats(pStats);
    15830 }
    15831 
    15832 #if VMA_STATS_STRING_ENABLED
    15833 
    15834 void vmaBuildStatsString(
    15835  VmaAllocator allocator,
    15836  char** ppStatsString,
    15837  VkBool32 detailedMap)
    15838 {
    15839  VMA_ASSERT(allocator && ppStatsString);
    15840  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15841 
    15842  VmaStringBuilder sb(allocator);
    15843  {
    15844  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    15845  json.BeginObject();
    15846 
    15847  VmaStats stats;
    15848  allocator->CalculateStats(&stats);
    15849 
    15850  json.WriteString("Total");
    15851  VmaPrintStatInfo(json, stats.total);
    15852 
    15853  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    15854  {
    15855  json.BeginString("Heap ");
    15856  json.ContinueString(heapIndex);
    15857  json.EndString();
    15858  json.BeginObject();
    15859 
    15860  json.WriteString("Size");
    15861  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    15862 
    15863  json.WriteString("Flags");
    15864  json.BeginArray(true);
    15865  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    15866  {
    15867  json.WriteString("DEVICE_LOCAL");
    15868  }
    15869  json.EndArray();
    15870 
    15871  if(stats.memoryHeap[heapIndex].blockCount > 0)
    15872  {
    15873  json.WriteString("Stats");
    15874  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    15875  }
    15876 
    15877  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    15878  {
    15879  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    15880  {
    15881  json.BeginString("Type ");
    15882  json.ContinueString(typeIndex);
    15883  json.EndString();
    15884 
    15885  json.BeginObject();
    15886 
    15887  json.WriteString("Flags");
    15888  json.BeginArray(true);
    15889  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    15890  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    15891  {
    15892  json.WriteString("DEVICE_LOCAL");
    15893  }
    15894  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    15895  {
    15896  json.WriteString("HOST_VISIBLE");
    15897  }
    15898  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    15899  {
    15900  json.WriteString("HOST_COHERENT");
    15901  }
    15902  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    15903  {
    15904  json.WriteString("HOST_CACHED");
    15905  }
    15906  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    15907  {
    15908  json.WriteString("LAZILY_ALLOCATED");
    15909  }
    15910  json.EndArray();
    15911 
    15912  if(stats.memoryType[typeIndex].blockCount > 0)
    15913  {
    15914  json.WriteString("Stats");
    15915  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    15916  }
    15917 
    15918  json.EndObject();
    15919  }
    15920  }
    15921 
    15922  json.EndObject();
    15923  }
    15924  if(detailedMap == VK_TRUE)
    15925  {
    15926  allocator->PrintDetailedMap(json);
    15927  }
    15928 
    15929  json.EndObject();
    15930  }
    15931 
    15932  const size_t len = sb.GetLength();
    15933  char* const pChars = vma_new_array(allocator, char, len + 1);
    15934  if(len > 0)
    15935  {
    15936  memcpy(pChars, sb.GetData(), len);
    15937  }
    15938  pChars[len] = '\0';
    15939  *ppStatsString = pChars;
    15940 }
    15941 
    15942 void vmaFreeStatsString(
    15943  VmaAllocator allocator,
    15944  char* pStatsString)
    15945 {
    15946  if(pStatsString != VMA_NULL)
    15947  {
    15948  VMA_ASSERT(allocator);
    15949  size_t len = strlen(pStatsString);
    15950  vma_delete_array(allocator, pStatsString, len + 1);
    15951  }
    15952 }
    15953 
    15954 #endif // #if VMA_STATS_STRING_ENABLED
    15955 
    15956 /*
    15957 This function is not protected by any mutex because it just reads immutable data.
    15958 */
    15959 VkResult vmaFindMemoryTypeIndex(
    15960  VmaAllocator allocator,
    15961  uint32_t memoryTypeBits,
    15962  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    15963  uint32_t* pMemoryTypeIndex)
    15964 {
    15965  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    15966  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    15967  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    15968 
    15969  if(pAllocationCreateInfo->memoryTypeBits != 0)
    15970  {
    15971  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
    15972  }
    15973 
    15974  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    15975  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    15976 
    15977  // Convert usage to requiredFlags and preferredFlags.
    15978  switch(pAllocationCreateInfo->usage)
    15979  {
    15981  break;
    15983  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    15984  {
    15985  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    15986  }
    15987  break;
    15989  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    15990  break;
    15992  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    15993  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    15994  {
    15995  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    15996  }
    15997  break;
    15999  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    16000  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    16001  break;
    16002  default:
    16003  break;
    16004  }
    16005 
    16006  *pMemoryTypeIndex = UINT32_MAX;
    16007  uint32_t minCost = UINT32_MAX;
    16008  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    16009  memTypeIndex < allocator->GetMemoryTypeCount();
    16010  ++memTypeIndex, memTypeBit <<= 1)
    16011  {
    16012  // This memory type is acceptable according to memoryTypeBits bitmask.
    16013  if((memTypeBit & memoryTypeBits) != 0)
    16014  {
    16015  const VkMemoryPropertyFlags currFlags =
    16016  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    16017  // This memory type contains requiredFlags.
    16018  if((requiredFlags & ~currFlags) == 0)
    16019  {
    16020  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    16021  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
    16022  // Remember memory type with lowest cost.
    16023  if(currCost < minCost)
    16024  {
    16025  *pMemoryTypeIndex = memTypeIndex;
    16026  if(currCost == 0)
    16027  {
    16028  return VK_SUCCESS;
    16029  }
    16030  minCost = currCost;
    16031  }
    16032  }
    16033  }
    16034  }
    16035  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    16036 }
    16037 
    16039  VmaAllocator allocator,
    16040  const VkBufferCreateInfo* pBufferCreateInfo,
    16041  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    16042  uint32_t* pMemoryTypeIndex)
    16043 {
    16044  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    16045  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
    16046  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    16047  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    16048 
    16049  const VkDevice hDev = allocator->m_hDevice;
    16050  VkBuffer hBuffer = VK_NULL_HANDLE;
    16051  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
    16052  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
    16053  if(res == VK_SUCCESS)
    16054  {
    16055  VkMemoryRequirements memReq = {};
    16056  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
    16057  hDev, hBuffer, &memReq);
    16058 
    16059  res = vmaFindMemoryTypeIndex(
    16060  allocator,
    16061  memReq.memoryTypeBits,
    16062  pAllocationCreateInfo,
    16063  pMemoryTypeIndex);
    16064 
    16065  allocator->GetVulkanFunctions().vkDestroyBuffer(
    16066  hDev, hBuffer, allocator->GetAllocationCallbacks());
    16067  }
    16068  return res;
    16069 }
    16070 
    16072  VmaAllocator allocator,
    16073  const VkImageCreateInfo* pImageCreateInfo,
    16074  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    16075  uint32_t* pMemoryTypeIndex)
    16076 {
    16077  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    16078  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
    16079  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    16080  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    16081 
    16082  const VkDevice hDev = allocator->m_hDevice;
    16083  VkImage hImage = VK_NULL_HANDLE;
    16084  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
    16085  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
    16086  if(res == VK_SUCCESS)
    16087  {
    16088  VkMemoryRequirements memReq = {};
    16089  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
    16090  hDev, hImage, &memReq);
    16091 
    16092  res = vmaFindMemoryTypeIndex(
    16093  allocator,
    16094  memReq.memoryTypeBits,
    16095  pAllocationCreateInfo,
    16096  pMemoryTypeIndex);
    16097 
    16098  allocator->GetVulkanFunctions().vkDestroyImage(
    16099  hDev, hImage, allocator->GetAllocationCallbacks());
    16100  }
    16101  return res;
    16102 }
    16103 
    16104 VkResult vmaCreatePool(
    16105  VmaAllocator allocator,
    16106  const VmaPoolCreateInfo* pCreateInfo,
    16107  VmaPool* pPool)
    16108 {
    16109  VMA_ASSERT(allocator && pCreateInfo && pPool);
    16110 
    16111  VMA_DEBUG_LOG("vmaCreatePool");
    16112 
    16113  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16114 
    16115  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
    16116 
    16117 #if VMA_RECORDING_ENABLED
    16118  if(allocator->GetRecorder() != VMA_NULL)
    16119  {
    16120  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
    16121  }
    16122 #endif
    16123 
    16124  return res;
    16125 }
    16126 
    16127 void vmaDestroyPool(
    16128  VmaAllocator allocator,
    16129  VmaPool pool)
    16130 {
    16131  VMA_ASSERT(allocator);
    16132 
    16133  if(pool == VK_NULL_HANDLE)
    16134  {
    16135  return;
    16136  }
    16137 
    16138  VMA_DEBUG_LOG("vmaDestroyPool");
    16139 
    16140  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16141 
    16142 #if VMA_RECORDING_ENABLED
    16143  if(allocator->GetRecorder() != VMA_NULL)
    16144  {
    16145  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
    16146  }
    16147 #endif
    16148 
    16149  allocator->DestroyPool(pool);
    16150 }
    16151 
    16152 void vmaGetPoolStats(
    16153  VmaAllocator allocator,
    16154  VmaPool pool,
    16155  VmaPoolStats* pPoolStats)
    16156 {
    16157  VMA_ASSERT(allocator && pool && pPoolStats);
    16158 
    16159  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16160 
    16161  allocator->GetPoolStats(pool, pPoolStats);
    16162 }
    16163 
    16165  VmaAllocator allocator,
    16166  VmaPool pool,
    16167  size_t* pLostAllocationCount)
    16168 {
    16169  VMA_ASSERT(allocator && pool);
    16170 
    16171  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16172 
    16173 #if VMA_RECORDING_ENABLED
    16174  if(allocator->GetRecorder() != VMA_NULL)
    16175  {
    16176  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
    16177  }
    16178 #endif
    16179 
    16180  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    16181 }
    16182 
    16183 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    16184 {
    16185  VMA_ASSERT(allocator && pool);
    16186 
    16187  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16188 
    16189  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
    16190 
    16191  return allocator->CheckPoolCorruption(pool);
    16192 }
    16193 
    16194 VkResult vmaAllocateMemory(
    16195  VmaAllocator allocator,
    16196  const VkMemoryRequirements* pVkMemoryRequirements,
    16197  const VmaAllocationCreateInfo* pCreateInfo,
    16198  VmaAllocation* pAllocation,
    16199  VmaAllocationInfo* pAllocationInfo)
    16200 {
    16201  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    16202 
    16203  VMA_DEBUG_LOG("vmaAllocateMemory");
    16204 
    16205  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16206 
    16207  VkResult result = allocator->AllocateMemory(
    16208  *pVkMemoryRequirements,
    16209  false, // requiresDedicatedAllocation
    16210  false, // prefersDedicatedAllocation
    16211  VK_NULL_HANDLE, // dedicatedBuffer
    16212  VK_NULL_HANDLE, // dedicatedImage
    16213  *pCreateInfo,
    16214  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    16215  1, // allocationCount
    16216  pAllocation);
    16217 
    16218 #if VMA_RECORDING_ENABLED
    16219  if(allocator->GetRecorder() != VMA_NULL)
    16220  {
    16221  allocator->GetRecorder()->RecordAllocateMemory(
    16222  allocator->GetCurrentFrameIndex(),
    16223  *pVkMemoryRequirements,
    16224  *pCreateInfo,
    16225  *pAllocation);
    16226  }
    16227 #endif
    16228 
    16229  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    16230  {
    16231  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16232  }
    16233 
    16234  return result;
    16235 }
    16236 
    16237 VkResult vmaAllocateMemoryPages(
    16238  VmaAllocator allocator,
    16239  const VkMemoryRequirements* pVkMemoryRequirements,
    16240  const VmaAllocationCreateInfo* pCreateInfo,
    16241  size_t allocationCount,
    16242  VmaAllocation* pAllocations,
    16243  VmaAllocationInfo* pAllocationInfo)
    16244 {
    16245  if(allocationCount == 0)
    16246  {
    16247  return VK_SUCCESS;
    16248  }
    16249 
    16250  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
    16251 
    16252  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
    16253 
    16254  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16255 
    16256  VkResult result = allocator->AllocateMemory(
    16257  *pVkMemoryRequirements,
    16258  false, // requiresDedicatedAllocation
    16259  false, // prefersDedicatedAllocation
    16260  VK_NULL_HANDLE, // dedicatedBuffer
    16261  VK_NULL_HANDLE, // dedicatedImage
    16262  *pCreateInfo,
    16263  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    16264  allocationCount,
    16265  pAllocations);
    16266 
    16267 #if VMA_RECORDING_ENABLED
    16268  if(allocator->GetRecorder() != VMA_NULL)
    16269  {
    16270  allocator->GetRecorder()->RecordAllocateMemoryPages(
    16271  allocator->GetCurrentFrameIndex(),
    16272  *pVkMemoryRequirements,
    16273  *pCreateInfo,
    16274  (uint64_t)allocationCount,
    16275  pAllocations);
    16276  }
    16277 #endif
    16278 
    16279  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    16280  {
    16281  for(size_t i = 0; i < allocationCount; ++i)
    16282  {
    16283  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
    16284  }
    16285  }
    16286 
    16287  return result;
    16288 }
    16289 
    16291  VmaAllocator allocator,
    16292  VkBuffer buffer,
    16293  const VmaAllocationCreateInfo* pCreateInfo,
    16294  VmaAllocation* pAllocation,
    16295  VmaAllocationInfo* pAllocationInfo)
    16296 {
    16297  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    16298 
    16299  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    16300 
    16301  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16302 
    16303  VkMemoryRequirements vkMemReq = {};
    16304  bool requiresDedicatedAllocation = false;
    16305  bool prefersDedicatedAllocation = false;
    16306  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    16307  requiresDedicatedAllocation,
    16308  prefersDedicatedAllocation);
    16309 
    16310  VkResult result = allocator->AllocateMemory(
    16311  vkMemReq,
    16312  requiresDedicatedAllocation,
    16313  prefersDedicatedAllocation,
    16314  buffer, // dedicatedBuffer
    16315  VK_NULL_HANDLE, // dedicatedImage
    16316  *pCreateInfo,
    16317  VMA_SUBALLOCATION_TYPE_BUFFER,
    16318  1, // allocationCount
    16319  pAllocation);
    16320 
    16321 #if VMA_RECORDING_ENABLED
    16322  if(allocator->GetRecorder() != VMA_NULL)
    16323  {
    16324  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
    16325  allocator->GetCurrentFrameIndex(),
    16326  vkMemReq,
    16327  requiresDedicatedAllocation,
    16328  prefersDedicatedAllocation,
    16329  *pCreateInfo,
    16330  *pAllocation);
    16331  }
    16332 #endif
    16333 
    16334  if(pAllocationInfo && result == VK_SUCCESS)
    16335  {
    16336  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16337  }
    16338 
    16339  return result;
    16340 }
    16341 
    16342 VkResult vmaAllocateMemoryForImage(
    16343  VmaAllocator allocator,
    16344  VkImage image,
    16345  const VmaAllocationCreateInfo* pCreateInfo,
    16346  VmaAllocation* pAllocation,
    16347  VmaAllocationInfo* pAllocationInfo)
    16348 {
    16349  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    16350 
    16351  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    16352 
    16353  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16354 
    16355  VkMemoryRequirements vkMemReq = {};
    16356  bool requiresDedicatedAllocation = false;
    16357  bool prefersDedicatedAllocation = false;
    16358  allocator->GetImageMemoryRequirements(image, vkMemReq,
    16359  requiresDedicatedAllocation, prefersDedicatedAllocation);
    16360 
    16361  VkResult result = allocator->AllocateMemory(
    16362  vkMemReq,
    16363  requiresDedicatedAllocation,
    16364  prefersDedicatedAllocation,
    16365  VK_NULL_HANDLE, // dedicatedBuffer
    16366  image, // dedicatedImage
    16367  *pCreateInfo,
    16368  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    16369  1, // allocationCount
    16370  pAllocation);
    16371 
    16372 #if VMA_RECORDING_ENABLED
    16373  if(allocator->GetRecorder() != VMA_NULL)
    16374  {
    16375  allocator->GetRecorder()->RecordAllocateMemoryForImage(
    16376  allocator->GetCurrentFrameIndex(),
    16377  vkMemReq,
    16378  requiresDedicatedAllocation,
    16379  prefersDedicatedAllocation,
    16380  *pCreateInfo,
    16381  *pAllocation);
    16382  }
    16383 #endif
    16384 
    16385  if(pAllocationInfo && result == VK_SUCCESS)
    16386  {
    16387  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16388  }
    16389 
    16390  return result;
    16391 }
    16392 
    16393 void vmaFreeMemory(
    16394  VmaAllocator allocator,
    16395  VmaAllocation allocation)
    16396 {
    16397  VMA_ASSERT(allocator);
    16398 
    16399  if(allocation == VK_NULL_HANDLE)
    16400  {
    16401  return;
    16402  }
    16403 
    16404  VMA_DEBUG_LOG("vmaFreeMemory");
    16405 
    16406  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16407 
    16408 #if VMA_RECORDING_ENABLED
    16409  if(allocator->GetRecorder() != VMA_NULL)
    16410  {
    16411  allocator->GetRecorder()->RecordFreeMemory(
    16412  allocator->GetCurrentFrameIndex(),
    16413  allocation);
    16414  }
    16415 #endif
    16416 
    16417  allocator->FreeMemory(
    16418  1, // allocationCount
    16419  &allocation);
    16420 }
    16421 
    16422 void vmaFreeMemoryPages(
    16423  VmaAllocator allocator,
    16424  size_t allocationCount,
    16425  VmaAllocation* pAllocations)
    16426 {
    16427  if(allocationCount == 0)
    16428  {
    16429  return;
    16430  }
    16431 
    16432  VMA_ASSERT(allocator);
    16433 
    16434  VMA_DEBUG_LOG("vmaFreeMemoryPages");
    16435 
    16436  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16437 
    16438 #if VMA_RECORDING_ENABLED
    16439  if(allocator->GetRecorder() != VMA_NULL)
    16440  {
    16441  allocator->GetRecorder()->RecordFreeMemoryPages(
    16442  allocator->GetCurrentFrameIndex(),
    16443  (uint64_t)allocationCount,
    16444  pAllocations);
    16445  }
    16446 #endif
    16447 
    16448  allocator->FreeMemory(allocationCount, pAllocations);
    16449 }
    16450 
    16451 VkResult vmaResizeAllocation(
    16452  VmaAllocator allocator,
    16453  VmaAllocation allocation,
    16454  VkDeviceSize newSize)
    16455 {
    16456  VMA_ASSERT(allocator && allocation);
    16457 
    16458  VMA_DEBUG_LOG("vmaResizeAllocation");
    16459 
    16460  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16461 
    16462 #if VMA_RECORDING_ENABLED
    16463  if(allocator->GetRecorder() != VMA_NULL)
    16464  {
    16465  allocator->GetRecorder()->RecordResizeAllocation(
    16466  allocator->GetCurrentFrameIndex(),
    16467  allocation,
    16468  newSize);
    16469  }
    16470 #endif
    16471 
    16472  return allocator->ResizeAllocation(allocation, newSize);
    16473 }
    16474 
    16476  VmaAllocator allocator,
    16477  VmaAllocation allocation,
    16478  VmaAllocationInfo* pAllocationInfo)
    16479 {
    16480  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    16481 
    16482  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16483 
    16484 #if VMA_RECORDING_ENABLED
    16485  if(allocator->GetRecorder() != VMA_NULL)
    16486  {
    16487  allocator->GetRecorder()->RecordGetAllocationInfo(
    16488  allocator->GetCurrentFrameIndex(),
    16489  allocation);
    16490  }
    16491 #endif
    16492 
    16493  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    16494 }
    16495 
    16496 VkBool32 vmaTouchAllocation(
    16497  VmaAllocator allocator,
    16498  VmaAllocation allocation)
    16499 {
    16500  VMA_ASSERT(allocator && allocation);
    16501 
    16502  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16503 
    16504 #if VMA_RECORDING_ENABLED
    16505  if(allocator->GetRecorder() != VMA_NULL)
    16506  {
    16507  allocator->GetRecorder()->RecordTouchAllocation(
    16508  allocator->GetCurrentFrameIndex(),
    16509  allocation);
    16510  }
    16511 #endif
    16512 
    16513  return allocator->TouchAllocation(allocation);
    16514 }
    16515 
    16517  VmaAllocator allocator,
    16518  VmaAllocation allocation,
    16519  void* pUserData)
    16520 {
    16521  VMA_ASSERT(allocator && allocation);
    16522 
    16523  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16524 
    16525  allocation->SetUserData(allocator, pUserData);
    16526 
    16527 #if VMA_RECORDING_ENABLED
    16528  if(allocator->GetRecorder() != VMA_NULL)
    16529  {
    16530  allocator->GetRecorder()->RecordSetAllocationUserData(
    16531  allocator->GetCurrentFrameIndex(),
    16532  allocation,
    16533  pUserData);
    16534  }
    16535 #endif
    16536 }
    16537 
    16539  VmaAllocator allocator,
    16540  VmaAllocation* pAllocation)
    16541 {
    16542  VMA_ASSERT(allocator && pAllocation);
    16543 
    16544  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    16545 
    16546  allocator->CreateLostAllocation(pAllocation);
    16547 
    16548 #if VMA_RECORDING_ENABLED
    16549  if(allocator->GetRecorder() != VMA_NULL)
    16550  {
    16551  allocator->GetRecorder()->RecordCreateLostAllocation(
    16552  allocator->GetCurrentFrameIndex(),
    16553  *pAllocation);
    16554  }
    16555 #endif
    16556 }
    16557 
    16558 VkResult vmaMapMemory(
    16559  VmaAllocator allocator,
    16560  VmaAllocation allocation,
    16561  void** ppData)
    16562 {
    16563  VMA_ASSERT(allocator && allocation && ppData);
    16564 
    16565  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16566 
    16567  VkResult res = allocator->Map(allocation, ppData);
    16568 
    16569 #if VMA_RECORDING_ENABLED
    16570  if(allocator->GetRecorder() != VMA_NULL)
    16571  {
    16572  allocator->GetRecorder()->RecordMapMemory(
    16573  allocator->GetCurrentFrameIndex(),
    16574  allocation);
    16575  }
    16576 #endif
    16577 
    16578  return res;
    16579 }
    16580 
    16581 void vmaUnmapMemory(
    16582  VmaAllocator allocator,
    16583  VmaAllocation allocation)
    16584 {
    16585  VMA_ASSERT(allocator && allocation);
    16586 
    16587  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16588 
    16589 #if VMA_RECORDING_ENABLED
    16590  if(allocator->GetRecorder() != VMA_NULL)
    16591  {
    16592  allocator->GetRecorder()->RecordUnmapMemory(
    16593  allocator->GetCurrentFrameIndex(),
    16594  allocation);
    16595  }
    16596 #endif
    16597 
    16598  allocator->Unmap(allocation);
    16599 }
    16600 
    16601 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    16602 {
    16603  VMA_ASSERT(allocator && allocation);
    16604 
    16605  VMA_DEBUG_LOG("vmaFlushAllocation");
    16606 
    16607  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16608 
    16609  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
    16610 
    16611 #if VMA_RECORDING_ENABLED
    16612  if(allocator->GetRecorder() != VMA_NULL)
    16613  {
    16614  allocator->GetRecorder()->RecordFlushAllocation(
    16615  allocator->GetCurrentFrameIndex(),
    16616  allocation, offset, size);
    16617  }
    16618 #endif
    16619 }
    16620 
    16621 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    16622 {
    16623  VMA_ASSERT(allocator && allocation);
    16624 
    16625  VMA_DEBUG_LOG("vmaInvalidateAllocation");
    16626 
    16627  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16628 
    16629  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
    16630 
    16631 #if VMA_RECORDING_ENABLED
    16632  if(allocator->GetRecorder() != VMA_NULL)
    16633  {
    16634  allocator->GetRecorder()->RecordInvalidateAllocation(
    16635  allocator->GetCurrentFrameIndex(),
    16636  allocation, offset, size);
    16637  }
    16638 #endif
    16639 }
    16640 
    16641 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    16642 {
    16643  VMA_ASSERT(allocator);
    16644 
    16645  VMA_DEBUG_LOG("vmaCheckCorruption");
    16646 
    16647  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16648 
    16649  return allocator->CheckCorruption(memoryTypeBits);
    16650 }
    16651 
    16652 VkResult vmaDefragment(
    16653  VmaAllocator allocator,
    16654  VmaAllocation* pAllocations,
    16655  size_t allocationCount,
    16656  VkBool32* pAllocationsChanged,
    16657  const VmaDefragmentationInfo *pDefragmentationInfo,
    16658  VmaDefragmentationStats* pDefragmentationStats)
    16659 {
    16660  // Deprecated interface, reimplemented using new one.
    16661 
    16662  VmaDefragmentationInfo2 info2 = {};
    16663  info2.allocationCount = (uint32_t)allocationCount;
    16664  info2.pAllocations = pAllocations;
    16665  info2.pAllocationsChanged = pAllocationsChanged;
    16666  if(pDefragmentationInfo != VMA_NULL)
    16667  {
    16668  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    16669  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
    16670  }
    16671  else
    16672  {
    16673  info2.maxCpuAllocationsToMove = UINT32_MAX;
    16674  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
    16675  }
    16676  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
    16677 
    16679  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
    16680  if(res == VK_NOT_READY)
    16681  {
    16682  res = vmaDefragmentationEnd( allocator, ctx);
    16683  }
    16684  return res;
    16685 }
    16686 
    16687 VkResult vmaDefragmentationBegin(
    16688  VmaAllocator allocator,
    16689  const VmaDefragmentationInfo2* pInfo,
    16690  VmaDefragmentationStats* pStats,
    16691  VmaDefragmentationContext *pContext)
    16692 {
    16693  VMA_ASSERT(allocator && pInfo && pContext);
    16694 
    16695  // Degenerate case: Nothing to defragment.
    16696  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
    16697  {
    16698  return VK_SUCCESS;
    16699  }
    16700 
    16701  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
    16702  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
    16703  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
    16704  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
    16705 
    16706  VMA_DEBUG_LOG("vmaDefragmentationBegin");
    16707 
    16708  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16709 
    16710  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
    16711 
    16712 #if VMA_RECORDING_ENABLED
    16713  if(allocator->GetRecorder() != VMA_NULL)
    16714  {
    16715  allocator->GetRecorder()->RecordDefragmentationBegin(
    16716  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
    16717  }
    16718 #endif
    16719 
    16720  return res;
    16721 }
    16722 
    16723 VkResult vmaDefragmentationEnd(
    16724  VmaAllocator allocator,
    16725  VmaDefragmentationContext context)
    16726 {
    16727  VMA_ASSERT(allocator);
    16728 
    16729  VMA_DEBUG_LOG("vmaDefragmentationEnd");
    16730 
    16731  if(context != VK_NULL_HANDLE)
    16732  {
    16733  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16734 
    16735 #if VMA_RECORDING_ENABLED
    16736  if(allocator->GetRecorder() != VMA_NULL)
    16737  {
    16738  allocator->GetRecorder()->RecordDefragmentationEnd(
    16739  allocator->GetCurrentFrameIndex(), context);
    16740  }
    16741 #endif
    16742 
    16743  return allocator->DefragmentationEnd(context);
    16744  }
    16745  else
    16746  {
    16747  return VK_SUCCESS;
    16748  }
    16749 }
    16750 
    16751 VkResult vmaBindBufferMemory(
    16752  VmaAllocator allocator,
    16753  VmaAllocation allocation,
    16754  VkBuffer buffer)
    16755 {
    16756  VMA_ASSERT(allocator && allocation && buffer);
    16757 
    16758  VMA_DEBUG_LOG("vmaBindBufferMemory");
    16759 
    16760  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16761 
    16762  return allocator->BindBufferMemory(allocation, buffer);
    16763 }
    16764 
    16765 VkResult vmaBindImageMemory(
    16766  VmaAllocator allocator,
    16767  VmaAllocation allocation,
    16768  VkImage image)
    16769 {
    16770  VMA_ASSERT(allocator && allocation && image);
    16771 
    16772  VMA_DEBUG_LOG("vmaBindImageMemory");
    16773 
    16774  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16775 
    16776  return allocator->BindImageMemory(allocation, image);
    16777 }
    16778 
    16779 VkResult vmaCreateBuffer(
    16780  VmaAllocator allocator,
    16781  const VkBufferCreateInfo* pBufferCreateInfo,
    16782  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    16783  VkBuffer* pBuffer,
    16784  VmaAllocation* pAllocation,
    16785  VmaAllocationInfo* pAllocationInfo)
    16786 {
    16787  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    16788 
    16789  if(pBufferCreateInfo->size == 0)
    16790  {
    16791  return VK_ERROR_VALIDATION_FAILED_EXT;
    16792  }
    16793 
    16794  VMA_DEBUG_LOG("vmaCreateBuffer");
    16795 
    16796  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16797 
    16798  *pBuffer = VK_NULL_HANDLE;
    16799  *pAllocation = VK_NULL_HANDLE;
    16800 
    16801  // 1. Create VkBuffer.
    16802  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    16803  allocator->m_hDevice,
    16804  pBufferCreateInfo,
    16805  allocator->GetAllocationCallbacks(),
    16806  pBuffer);
    16807  if(res >= 0)
    16808  {
    16809  // 2. vkGetBufferMemoryRequirements.
    16810  VkMemoryRequirements vkMemReq = {};
    16811  bool requiresDedicatedAllocation = false;
    16812  bool prefersDedicatedAllocation = false;
    16813  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    16814  requiresDedicatedAllocation, prefersDedicatedAllocation);
    16815 
    16816  // Make sure alignment requirements for specific buffer usages reported
    16817  // in Physical Device Properties are included in alignment reported by memory requirements.
    16818  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
    16819  {
    16820  VMA_ASSERT(vkMemReq.alignment %
    16821  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
    16822  }
    16823  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
    16824  {
    16825  VMA_ASSERT(vkMemReq.alignment %
    16826  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
    16827  }
    16828  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
    16829  {
    16830  VMA_ASSERT(vkMemReq.alignment %
    16831  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
    16832  }
    16833 
    16834  // 3. Allocate memory using allocator.
    16835  res = allocator->AllocateMemory(
    16836  vkMemReq,
    16837  requiresDedicatedAllocation,
    16838  prefersDedicatedAllocation,
    16839  *pBuffer, // dedicatedBuffer
    16840  VK_NULL_HANDLE, // dedicatedImage
    16841  *pAllocationCreateInfo,
    16842  VMA_SUBALLOCATION_TYPE_BUFFER,
    16843  1, // allocationCount
    16844  pAllocation);
    16845 
    16846 #if VMA_RECORDING_ENABLED
    16847  if(allocator->GetRecorder() != VMA_NULL)
    16848  {
    16849  allocator->GetRecorder()->RecordCreateBuffer(
    16850  allocator->GetCurrentFrameIndex(),
    16851  *pBufferCreateInfo,
    16852  *pAllocationCreateInfo,
    16853  *pAllocation);
    16854  }
    16855 #endif
    16856 
    16857  if(res >= 0)
    16858  {
    16859  // 3. Bind buffer with memory.
    16860  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
    16861  {
    16862  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
    16863  }
    16864  if(res >= 0)
    16865  {
    16866  // All steps succeeded.
    16867  #if VMA_STATS_STRING_ENABLED
    16868  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
    16869  #endif
    16870  if(pAllocationInfo != VMA_NULL)
    16871  {
    16872  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16873  }
    16874 
    16875  return VK_SUCCESS;
    16876  }
    16877  allocator->FreeMemory(
    16878  1, // allocationCount
    16879  pAllocation);
    16880  *pAllocation = VK_NULL_HANDLE;
    16881  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    16882  *pBuffer = VK_NULL_HANDLE;
    16883  return res;
    16884  }
    16885  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    16886  *pBuffer = VK_NULL_HANDLE;
    16887  return res;
    16888  }
    16889  return res;
    16890 }
    16891 
    16892 void vmaDestroyBuffer(
    16893  VmaAllocator allocator,
    16894  VkBuffer buffer,
    16895  VmaAllocation allocation)
    16896 {
    16897  VMA_ASSERT(allocator);
    16898 
    16899  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    16900  {
    16901  return;
    16902  }
    16903 
    16904  VMA_DEBUG_LOG("vmaDestroyBuffer");
    16905 
    16906  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16907 
    16908 #if VMA_RECORDING_ENABLED
    16909  if(allocator->GetRecorder() != VMA_NULL)
    16910  {
    16911  allocator->GetRecorder()->RecordDestroyBuffer(
    16912  allocator->GetCurrentFrameIndex(),
    16913  allocation);
    16914  }
    16915 #endif
    16916 
    16917  if(buffer != VK_NULL_HANDLE)
    16918  {
    16919  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    16920  }
    16921 
    16922  if(allocation != VK_NULL_HANDLE)
    16923  {
    16924  allocator->FreeMemory(
    16925  1, // allocationCount
    16926  &allocation);
    16927  }
    16928 }
    16929 
    16930 VkResult vmaCreateImage(
    16931  VmaAllocator allocator,
    16932  const VkImageCreateInfo* pImageCreateInfo,
    16933  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    16934  VkImage* pImage,
    16935  VmaAllocation* pAllocation,
    16936  VmaAllocationInfo* pAllocationInfo)
    16937 {
    16938  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    16939 
    16940  if(pImageCreateInfo->extent.width == 0 ||
    16941  pImageCreateInfo->extent.height == 0 ||
    16942  pImageCreateInfo->extent.depth == 0 ||
    16943  pImageCreateInfo->mipLevels == 0 ||
    16944  pImageCreateInfo->arrayLayers == 0)
    16945  {
    16946  return VK_ERROR_VALIDATION_FAILED_EXT;
    16947  }
    16948 
    16949  VMA_DEBUG_LOG("vmaCreateImage");
    16950 
    16951  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16952 
    16953  *pImage = VK_NULL_HANDLE;
    16954  *pAllocation = VK_NULL_HANDLE;
    16955 
    16956  // 1. Create VkImage.
    16957  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    16958  allocator->m_hDevice,
    16959  pImageCreateInfo,
    16960  allocator->GetAllocationCallbacks(),
    16961  pImage);
    16962  if(res >= 0)
    16963  {
    16964  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    16965  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    16966  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    16967 
    16968  // 2. Allocate memory using allocator.
    16969  VkMemoryRequirements vkMemReq = {};
    16970  bool requiresDedicatedAllocation = false;
    16971  bool prefersDedicatedAllocation = false;
    16972  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
    16973  requiresDedicatedAllocation, prefersDedicatedAllocation);
    16974 
    16975  res = allocator->AllocateMemory(
    16976  vkMemReq,
    16977  requiresDedicatedAllocation,
    16978  prefersDedicatedAllocation,
    16979  VK_NULL_HANDLE, // dedicatedBuffer
    16980  *pImage, // dedicatedImage
    16981  *pAllocationCreateInfo,
    16982  suballocType,
    16983  1, // allocationCount
    16984  pAllocation);
    16985 
    16986 #if VMA_RECORDING_ENABLED
    16987  if(allocator->GetRecorder() != VMA_NULL)
    16988  {
    16989  allocator->GetRecorder()->RecordCreateImage(
    16990  allocator->GetCurrentFrameIndex(),
    16991  *pImageCreateInfo,
    16992  *pAllocationCreateInfo,
    16993  *pAllocation);
    16994  }
    16995 #endif
    16996 
    16997  if(res >= 0)
    16998  {
    16999  // 3. Bind image with memory.
    17000  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
    17001  {
    17002  res = allocator->BindImageMemory(*pAllocation, *pImage);
    17003  }
    17004  if(res >= 0)
    17005  {
    17006  // All steps succeeded.
    17007  #if VMA_STATS_STRING_ENABLED
    17008  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
    17009  #endif
    17010  if(pAllocationInfo != VMA_NULL)
    17011  {
    17012  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    17013  }
    17014 
    17015  return VK_SUCCESS;
    17016  }
    17017  allocator->FreeMemory(
    17018  1, // allocationCount
    17019  pAllocation);
    17020  *pAllocation = VK_NULL_HANDLE;
    17021  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    17022  *pImage = VK_NULL_HANDLE;
    17023  return res;
    17024  }
    17025  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    17026  *pImage = VK_NULL_HANDLE;
    17027  return res;
    17028  }
    17029  return res;
    17030 }
    17031 
    17032 void vmaDestroyImage(
    17033  VmaAllocator allocator,
    17034  VkImage image,
    17035  VmaAllocation allocation)
    17036 {
    17037  VMA_ASSERT(allocator);
    17038 
    17039  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    17040  {
    17041  return;
    17042  }
    17043 
    17044  VMA_DEBUG_LOG("vmaDestroyImage");
    17045 
    17046  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    17047 
    17048 #if VMA_RECORDING_ENABLED
    17049  if(allocator->GetRecorder() != VMA_NULL)
    17050  {
    17051  allocator->GetRecorder()->RecordDestroyImage(
    17052  allocator->GetCurrentFrameIndex(),
    17053  allocation);
    17054  }
    17055 #endif
    17056 
    17057  if(image != VK_NULL_HANDLE)
    17058  {
    17059  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    17060  }
    17061  if(allocation != VK_NULL_HANDLE)
    17062  {
    17063  allocator->FreeMemory(
    17064  1, // allocationCount
    17065  &allocation);
    17066  }
    17067 }
    17068 
    17069 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:1786
    -
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:2086
    +Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    1678 /*
    1679 Define this macro to 0/1 to disable/enable support for recording functionality,
    1680 available through VmaAllocatorCreateInfo::pRecordSettings.
    1681 */
    1682 #ifndef VMA_RECORDING_ENABLED
    1683  #ifdef _WIN32
    1684  #define VMA_RECORDING_ENABLED 1
    1685  #else
    1686  #define VMA_RECORDING_ENABLED 0
    1687  #endif
    1688 #endif
    1689 
    1690 #ifndef NOMINMAX
    1691  #define NOMINMAX // For windows.h
    1692 #endif
    1693 
    1694 #ifndef VULKAN_H_
    1695  #include <vulkan/vulkan.h>
    1696 #endif
    1697 
    1698 #if VMA_RECORDING_ENABLED
    1699  #include <windows.h>
    1700 #endif
    1701 
    1702 #if !defined(VMA_DEDICATED_ALLOCATION)
    1703  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
    1704  #define VMA_DEDICATED_ALLOCATION 1
    1705  #else
    1706  #define VMA_DEDICATED_ALLOCATION 0
    1707  #endif
    1708 #endif
    1709 
    1719 VK_DEFINE_HANDLE(VmaAllocator)
    1720 
    1721 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    1723  VmaAllocator allocator,
    1724  uint32_t memoryType,
    1725  VkDeviceMemory memory,
    1726  VkDeviceSize size);
    1728 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    1729  VmaAllocator allocator,
    1730  uint32_t memoryType,
    1731  VkDeviceMemory memory,
    1732  VkDeviceSize size);
    1733 
    1747 
    1777 
    1780 typedef VkFlags VmaAllocatorCreateFlags;
    1781 
    1786 typedef struct VmaVulkanFunctions {
    1787  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    1788  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    1789  PFN_vkAllocateMemory vkAllocateMemory;
    1790  PFN_vkFreeMemory vkFreeMemory;
    1791  PFN_vkMapMemory vkMapMemory;
    1792  PFN_vkUnmapMemory vkUnmapMemory;
    1793  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
    1794  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
    1795  PFN_vkBindBufferMemory vkBindBufferMemory;
    1796  PFN_vkBindImageMemory vkBindImageMemory;
    1797  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    1798  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    1799  PFN_vkCreateBuffer vkCreateBuffer;
    1800  PFN_vkDestroyBuffer vkDestroyBuffer;
    1801  PFN_vkCreateImage vkCreateImage;
    1802  PFN_vkDestroyImage vkDestroyImage;
    1803  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
    1804 #if VMA_DEDICATED_ALLOCATION
    1805  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    1806  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    1807 #endif
    1809 
    1811 typedef enum VmaRecordFlagBits {
    1818 
    1821 typedef VkFlags VmaRecordFlags;
    1822 
    1824 typedef struct VmaRecordSettings
    1825 {
    1835  const char* pFilePath;
    1837 
    1840 {
    1844 
    1845  VkPhysicalDevice physicalDevice;
    1847 
    1848  VkDevice device;
    1850 
    1853 
    1854  const VkAllocationCallbacks* pAllocationCallbacks;
    1856 
    1896  const VkDeviceSize* pHeapSizeLimit;
    1917 
    1919 VkResult vmaCreateAllocator(
    1920  const VmaAllocatorCreateInfo* pCreateInfo,
    1921  VmaAllocator* pAllocator);
    1922 
    1924 void vmaDestroyAllocator(
    1925  VmaAllocator allocator);
    1926 
    1932  VmaAllocator allocator,
    1933  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    1934 
    1940  VmaAllocator allocator,
    1941  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    1942 
    1950  VmaAllocator allocator,
    1951  uint32_t memoryTypeIndex,
    1952  VkMemoryPropertyFlags* pFlags);
    1953 
    1963  VmaAllocator allocator,
    1964  uint32_t frameIndex);
    1965 
    1968 typedef struct VmaStatInfo
    1969 {
    1971  uint32_t blockCount;
    1977  VkDeviceSize usedBytes;
    1979  VkDeviceSize unusedBytes;
    1982 } VmaStatInfo;
    1983 
    1985 typedef struct VmaStats
    1986 {
    1987  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    1988  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    1990 } VmaStats;
    1991 
    1993 void vmaCalculateStats(
    1994  VmaAllocator allocator,
    1995  VmaStats* pStats);
    1996 
    1997 #ifndef VMA_STATS_STRING_ENABLED
    1998 #define VMA_STATS_STRING_ENABLED 1
    1999 #endif
    2000 
    2001 #if VMA_STATS_STRING_ENABLED
    2002 
    2004 
    2006 void vmaBuildStatsString(
    2007  VmaAllocator allocator,
    2008  char** ppStatsString,
    2009  VkBool32 detailedMap);
    2010 
    2011 void vmaFreeStatsString(
    2012  VmaAllocator allocator,
    2013  char* pStatsString);
    2014 
    2015 #endif // #if VMA_STATS_STRING_ENABLED
    2016 
    2025 VK_DEFINE_HANDLE(VmaPool)
    2026 
    2027 typedef enum VmaMemoryUsage
    2028 {
    2077 } VmaMemoryUsage;
    2078 
    2088 
    2149 
    2165 
    2175 
    2182 
    2186 
    2188 {
    2201  VkMemoryPropertyFlags requiredFlags;
    2206  VkMemoryPropertyFlags preferredFlags;
    2214  uint32_t memoryTypeBits;
    2227  void* pUserData;
    2229 
    2246 VkResult vmaFindMemoryTypeIndex(
    2247  VmaAllocator allocator,
    2248  uint32_t memoryTypeBits,
    2249  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2250  uint32_t* pMemoryTypeIndex);
    2251 
    2265  VmaAllocator allocator,
    2266  const VkBufferCreateInfo* pBufferCreateInfo,
    2267  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2268  uint32_t* pMemoryTypeIndex);
    2269 
    2283  VmaAllocator allocator,
    2284  const VkImageCreateInfo* pImageCreateInfo,
    2285  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2286  uint32_t* pMemoryTypeIndex);
    2287 
    2308 
    2325 
    2336 
    2342 
    2345 typedef VkFlags VmaPoolCreateFlags;
    2346 
    2349 typedef struct VmaPoolCreateInfo {
    2364  VkDeviceSize blockSize;
    2393 
    2396 typedef struct VmaPoolStats {
    2399  VkDeviceSize size;
    2402  VkDeviceSize unusedSize;
    2415  VkDeviceSize unusedRangeSizeMax;
    2418  size_t blockCount;
    2419 } VmaPoolStats;
    2420 
    2427 VkResult vmaCreatePool(
    2428  VmaAllocator allocator,
    2429  const VmaPoolCreateInfo* pCreateInfo,
    2430  VmaPool* pPool);
    2431 
    2434 void vmaDestroyPool(
    2435  VmaAllocator allocator,
    2436  VmaPool pool);
    2437 
    2444 void vmaGetPoolStats(
    2445  VmaAllocator allocator,
    2446  VmaPool pool,
    2447  VmaPoolStats* pPoolStats);
    2448 
    2456  VmaAllocator allocator,
    2457  VmaPool pool,
    2458  size_t* pLostAllocationCount);
    2459 
    2474 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
    2475 
    2500 VK_DEFINE_HANDLE(VmaAllocation)
    2501 
    2502 
    2504 typedef struct VmaAllocationInfo {
    2509  uint32_t memoryType;
    2518  VkDeviceMemory deviceMemory;
    2523  VkDeviceSize offset;
    2528  VkDeviceSize size;
    2542  void* pUserData;
    2544 
    2555 VkResult vmaAllocateMemory(
    2556  VmaAllocator allocator,
    2557  const VkMemoryRequirements* pVkMemoryRequirements,
    2558  const VmaAllocationCreateInfo* pCreateInfo,
    2559  VmaAllocation* pAllocation,
    2560  VmaAllocationInfo* pAllocationInfo);
    2561 
    2581 VkResult vmaAllocateMemoryPages(
    2582  VmaAllocator allocator,
    2583  const VkMemoryRequirements* pVkMemoryRequirements,
    2584  const VmaAllocationCreateInfo* pCreateInfo,
    2585  size_t allocationCount,
    2586  VmaAllocation* pAllocations,
    2587  VmaAllocationInfo* pAllocationInfo);
    2588 
    2596  VmaAllocator allocator,
    2597  VkBuffer buffer,
    2598  const VmaAllocationCreateInfo* pCreateInfo,
    2599  VmaAllocation* pAllocation,
    2600  VmaAllocationInfo* pAllocationInfo);
    2601 
    2603 VkResult vmaAllocateMemoryForImage(
    2604  VmaAllocator allocator,
    2605  VkImage image,
    2606  const VmaAllocationCreateInfo* pCreateInfo,
    2607  VmaAllocation* pAllocation,
    2608  VmaAllocationInfo* pAllocationInfo);
    2609 
    2614 void vmaFreeMemory(
    2615  VmaAllocator allocator,
    2616  VmaAllocation allocation);
    2617 
    2628 void vmaFreeMemoryPages(
    2629  VmaAllocator allocator,
    2630  size_t allocationCount,
    2631  VmaAllocation* pAllocations);
    2632 
    2653 VkResult vmaResizeAllocation(
    2654  VmaAllocator allocator,
    2655  VmaAllocation allocation,
    2656  VkDeviceSize newSize);
    2657 
    2675  VmaAllocator allocator,
    2676  VmaAllocation allocation,
    2677  VmaAllocationInfo* pAllocationInfo);
    2678 
    2693 VkBool32 vmaTouchAllocation(
    2694  VmaAllocator allocator,
    2695  VmaAllocation allocation);
    2696 
    2711  VmaAllocator allocator,
    2712  VmaAllocation allocation,
    2713  void* pUserData);
    2714 
    2726  VmaAllocator allocator,
    2727  VmaAllocation* pAllocation);
    2728 
    2763 VkResult vmaMapMemory(
    2764  VmaAllocator allocator,
    2765  VmaAllocation allocation,
    2766  void** ppData);
    2767 
    2772 void vmaUnmapMemory(
    2773  VmaAllocator allocator,
    2774  VmaAllocation allocation);
    2775 
    2792 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2793 
    2810 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2811 
    2828 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
    2829 
    2836 VK_DEFINE_HANDLE(VmaDefragmentationContext)
    2837 
    2838 typedef enum VmaDefragmentationFlagBits {
    2842 typedef VkFlags VmaDefragmentationFlags;
    2843 
    2848 typedef struct VmaDefragmentationInfo2 {
    2872  uint32_t poolCount;
    2893  VkDeviceSize maxCpuBytesToMove;
    2903  VkDeviceSize maxGpuBytesToMove;
    2917  VkCommandBuffer commandBuffer;
    2919 
    2924 typedef struct VmaDefragmentationInfo {
    2929  VkDeviceSize maxBytesToMove;
    2936 
    2938 typedef struct VmaDefragmentationStats {
    2940  VkDeviceSize bytesMoved;
    2942  VkDeviceSize bytesFreed;
    2948 
    2978 VkResult vmaDefragmentationBegin(
    2979  VmaAllocator allocator,
    2980  const VmaDefragmentationInfo2* pInfo,
    2981  VmaDefragmentationStats* pStats,
    2982  VmaDefragmentationContext *pContext);
    2983 
    2989 VkResult vmaDefragmentationEnd(
    2990  VmaAllocator allocator,
    2991  VmaDefragmentationContext context);
    2992 
    3033 VkResult vmaDefragment(
    3034  VmaAllocator allocator,
    3035  VmaAllocation* pAllocations,
    3036  size_t allocationCount,
    3037  VkBool32* pAllocationsChanged,
    3038  const VmaDefragmentationInfo *pDefragmentationInfo,
    3039  VmaDefragmentationStats* pDefragmentationStats);
    3040 
    3053 VkResult vmaBindBufferMemory(
    3054  VmaAllocator allocator,
    3055  VmaAllocation allocation,
    3056  VkBuffer buffer);
    3057 
    3070 VkResult vmaBindImageMemory(
    3071  VmaAllocator allocator,
    3072  VmaAllocation allocation,
    3073  VkImage image);
    3074 
    3101 VkResult vmaCreateBuffer(
    3102  VmaAllocator allocator,
    3103  const VkBufferCreateInfo* pBufferCreateInfo,
    3104  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    3105  VkBuffer* pBuffer,
    3106  VmaAllocation* pAllocation,
    3107  VmaAllocationInfo* pAllocationInfo);
    3108 
    3120 void vmaDestroyBuffer(
    3121  VmaAllocator allocator,
    3122  VkBuffer buffer,
    3123  VmaAllocation allocation);
    3124 
    3126 VkResult vmaCreateImage(
    3127  VmaAllocator allocator,
    3128  const VkImageCreateInfo* pImageCreateInfo,
    3129  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    3130  VkImage* pImage,
    3131  VmaAllocation* pAllocation,
    3132  VmaAllocationInfo* pAllocationInfo);
    3133 
    3145 void vmaDestroyImage(
    3146  VmaAllocator allocator,
    3147  VkImage image,
    3148  VmaAllocation allocation);
    3149 
    3150 #ifdef __cplusplus
    3151 }
    3152 #endif
    3153 
    3154 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    3155 
    3156 // For Visual Studio IntelliSense.
    3157 #if defined(__cplusplus) && defined(__INTELLISENSE__)
    3158 #define VMA_IMPLEMENTATION
    3159 #endif
    3160 
    3161 #ifdef VMA_IMPLEMENTATION
    3162 #undef VMA_IMPLEMENTATION
    3163 
    3164 #include <cstdint>
    3165 #include <cstdlib>
    3166 #include <cstring>
    3167 
    3168 /*******************************************************************************
    3169 CONFIGURATION SECTION
    3170 
    3171 Define some of these macros before each #include of this header or change them
    3172 here if you need other then default behavior depending on your environment.
    3173 */
    3174 
    3175 /*
    3176 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    3177 internally, like:
    3178 
    3179  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    3180 
    3181 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    3182 VmaAllocatorCreateInfo::pVulkanFunctions.
    3183 */
    3184 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
    3185 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    3186 #endif
    3187 
    3188 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    3189 //#define VMA_USE_STL_CONTAINERS 1
    3190 
    3191 /* Set this macro to 1 to make the library including and using STL containers:
    3192 std::pair, std::vector, std::list, std::unordered_map.
    3193 
    3194 Set it to 0 or undefined to make the library using its own implementation of
    3195 the containers.
    3196 */
    3197 #if VMA_USE_STL_CONTAINERS
    3198  #define VMA_USE_STL_VECTOR 1
    3199  #define VMA_USE_STL_UNORDERED_MAP 1
    3200  #define VMA_USE_STL_LIST 1
    3201 #endif
    3202 
    3203 #ifndef VMA_USE_STL_SHARED_MUTEX
    3204  // Compiler conforms to C++17.
    3205  #if __cplusplus >= 201703L
    3206  #define VMA_USE_STL_SHARED_MUTEX 1
    3207  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
    3208  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
    3209  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
    3210  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
    3211  #define VMA_USE_STL_SHARED_MUTEX 1
    3212  #else
    3213  #define VMA_USE_STL_SHARED_MUTEX 0
    3214  #endif
    3215 #endif
    3216 
    3217 /*
    3218 THESE INCLUDES ARE NOT ENABLED BY DEFAULT.
    3219 Library has its own container implementation.
    3220 */
    3221 #if VMA_USE_STL_VECTOR
    3222  #include <vector>
    3223 #endif
    3224 
    3225 #if VMA_USE_STL_UNORDERED_MAP
    3226  #include <unordered_map>
    3227 #endif
    3228 
    3229 #if VMA_USE_STL_LIST
    3230  #include <list>
    3231 #endif
    3232 
    3233 /*
    3234 Following headers are used in this CONFIGURATION section only, so feel free to
    3235 remove them if not needed.
    3236 */
    3237 #include <cassert> // for assert
    3238 #include <algorithm> // for min, max
    3239 #include <mutex>
    3240 
    3241 #ifndef VMA_NULL
    3242  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    3243  #define VMA_NULL nullptr
    3244 #endif
    3245 
    3246 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
    3247 #include <cstdlib>
    3248 void *aligned_alloc(size_t alignment, size_t size)
    3249 {
    3250  // alignment must be >= sizeof(void*)
    3251  if(alignment < sizeof(void*))
    3252  {
    3253  alignment = sizeof(void*);
    3254  }
    3255 
    3256  return memalign(alignment, size);
    3257 }
    3258 #elif defined(__APPLE__) || defined(__ANDROID__)
    3259 #include <cstdlib>
    3260 void *aligned_alloc(size_t alignment, size_t size)
    3261 {
    3262  // alignment must be >= sizeof(void*)
    3263  if(alignment < sizeof(void*))
    3264  {
    3265  alignment = sizeof(void*);
    3266  }
    3267 
    3268  void *pointer;
    3269  if(posix_memalign(&pointer, alignment, size) == 0)
    3270  return pointer;
    3271  return VMA_NULL;
    3272 }
    3273 #endif
    3274 
    3275 // If your compiler is not compatible with C++11 and definition of
    3276 // aligned_alloc() function is missing, uncommeting following line may help:
    3277 
    3278 //#include <malloc.h>
    3279 
    3280 // Normal assert to check for programmer's errors, especially in Debug configuration.
    3281 #ifndef VMA_ASSERT
    3282  #ifdef _DEBUG
    3283  #define VMA_ASSERT(expr) assert(expr)
    3284  #else
    3285  #define VMA_ASSERT(expr)
    3286  #endif
    3287 #endif
    3288 
    3289 // Assert that will be called very often, like inside data structures e.g. operator[].
    3290 // Making it non-empty can make program slow.
    3291 #ifndef VMA_HEAVY_ASSERT
    3292  #ifdef _DEBUG
    3293  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    3294  #else
    3295  #define VMA_HEAVY_ASSERT(expr)
    3296  #endif
    3297 #endif
    3298 
    3299 #ifndef VMA_ALIGN_OF
    3300  #define VMA_ALIGN_OF(type) (__alignof(type))
    3301 #endif
    3302 
    3303 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    3304  #if defined(_WIN32)
    3305  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    3306  #else
    3307  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    3308  #endif
    3309 #endif
    3310 
    3311 #ifndef VMA_SYSTEM_FREE
    3312  #if defined(_WIN32)
    3313  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    3314  #else
    3315  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    3316  #endif
    3317 #endif
    3318 
    3319 #ifndef VMA_MIN
    3320  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    3321 #endif
    3322 
    3323 #ifndef VMA_MAX
    3324  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    3325 #endif
    3326 
    3327 #ifndef VMA_SWAP
    3328  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    3329 #endif
    3330 
    3331 #ifndef VMA_SORT
    3332  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    3333 #endif
    3334 
    3335 #ifndef VMA_DEBUG_LOG
    3336  #define VMA_DEBUG_LOG(format, ...)
    3337  /*
    3338  #define VMA_DEBUG_LOG(format, ...) do { \
    3339  printf(format, __VA_ARGS__); \
    3340  printf("\n"); \
    3341  } while(false)
    3342  */
    3343 #endif
    3344 
    3345 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    3346 #if VMA_STATS_STRING_ENABLED
    3347  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    3348  {
    3349  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    3350  }
    3351  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    3352  {
    3353  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    3354  }
    3355  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    3356  {
    3357  snprintf(outStr, strLen, "%p", ptr);
    3358  }
    3359 #endif
    3360 
    3361 #ifndef VMA_MUTEX
    3362  class VmaMutex
    3363  {
    3364  public:
    3365  void Lock() { m_Mutex.lock(); }
    3366  void Unlock() { m_Mutex.unlock(); }
    3367  private:
    3368  std::mutex m_Mutex;
    3369  };
    3370  #define VMA_MUTEX VmaMutex
    3371 #endif
    3372 
    3373 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
    3374 #ifndef VMA_RW_MUTEX
    3375  #if VMA_USE_STL_SHARED_MUTEX
    3376  // Use std::shared_mutex from C++17.
    3377  #include <shared_mutex>
    3378  class VmaRWMutex
    3379  {
    3380  public:
    3381  void LockRead() { m_Mutex.lock_shared(); }
    3382  void UnlockRead() { m_Mutex.unlock_shared(); }
    3383  void LockWrite() { m_Mutex.lock(); }
    3384  void UnlockWrite() { m_Mutex.unlock(); }
    3385  private:
    3386  std::shared_mutex m_Mutex;
    3387  };
    3388  #define VMA_RW_MUTEX VmaRWMutex
    3389  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
    3390  // Use SRWLOCK from WinAPI.
    3391  // Minimum supported client = Windows Vista, server = Windows Server 2008.
    3392  class VmaRWMutex
    3393  {
    3394  public:
    3395  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
    3396  void LockRead() { AcquireSRWLockShared(&m_Lock); }
    3397  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
    3398  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
    3399  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
    3400  private:
    3401  SRWLOCK m_Lock;
    3402  };
    3403  #define VMA_RW_MUTEX VmaRWMutex
    3404  #else
    3405  // Less efficient fallback: Use normal mutex.
    3406  class VmaRWMutex
    3407  {
    3408  public:
    3409  void LockRead() { m_Mutex.Lock(); }
    3410  void UnlockRead() { m_Mutex.Unlock(); }
    3411  void LockWrite() { m_Mutex.Lock(); }
    3412  void UnlockWrite() { m_Mutex.Unlock(); }
    3413  private:
    3414  VMA_MUTEX m_Mutex;
    3415  };
    3416  #define VMA_RW_MUTEX VmaRWMutex
    3417  #endif // #if VMA_USE_STL_SHARED_MUTEX
    3418 #endif // #ifndef VMA_RW_MUTEX
    3419 
    3420 /*
    3421 If providing your own implementation, you need to implement a subset of std::atomic:
    3422 
    3423 - Constructor(uint32_t desired)
    3424 - uint32_t load() const
    3425 - void store(uint32_t desired)
    3426 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    3427 */
    3428 #ifndef VMA_ATOMIC_UINT32
    3429  #include <atomic>
    3430  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    3431 #endif
    3432 
    3433 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    3434 
    3438  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    3439 #endif
    3440 
    3441 #ifndef VMA_DEBUG_ALIGNMENT
    3442 
    3446  #define VMA_DEBUG_ALIGNMENT (1)
    3447 #endif
    3448 
    3449 #ifndef VMA_DEBUG_MARGIN
    3450 
    3454  #define VMA_DEBUG_MARGIN (0)
    3455 #endif
    3456 
    3457 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
    3458 
    3462  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
    3463 #endif
    3464 
    3465 #ifndef VMA_DEBUG_DETECT_CORRUPTION
    3466 
    3471  #define VMA_DEBUG_DETECT_CORRUPTION (0)
    3472 #endif
    3473 
    3474 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    3475 
    3479  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    3480 #endif
    3481 
    3482 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    3483 
    3487  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    3488 #endif
    3489 
    3490 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    3491  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
    3493 #endif
    3494 
    3495 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    3496  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
    3498 #endif
    3499 
    3500 #ifndef VMA_CLASS_NO_COPY
    3501  #define VMA_CLASS_NO_COPY(className) \
    3502  private: \
    3503  className(const className&) = delete; \
    3504  className& operator=(const className&) = delete;
    3505 #endif
    3506 
    3507 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    3508 
    3509 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
    3510 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
    3511 
    3512 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
    3513 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
    3514 
    3515 /*******************************************************************************
    3516 END OF CONFIGURATION
    3517 */
    3518 
    3519 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
    3520 
    3521 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    3522  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    3523 
    3524 // Returns number of bits set to 1 in (v).
    3525 static inline uint32_t VmaCountBitsSet(uint32_t v)
    3526 {
    3527  uint32_t c = v - ((v >> 1) & 0x55555555);
    3528  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    3529  c = ((c >> 4) + c) & 0x0F0F0F0F;
    3530  c = ((c >> 8) + c) & 0x00FF00FF;
    3531  c = ((c >> 16) + c) & 0x0000FFFF;
    3532  return c;
    3533 }
    3534 
    3535 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    3536 // Use types like uint32_t, uint64_t as T.
    3537 template <typename T>
    3538 static inline T VmaAlignUp(T val, T align)
    3539 {
    3540  return (val + align - 1) / align * align;
    3541 }
    3542 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
    3543 // Use types like uint32_t, uint64_t as T.
    3544 template <typename T>
    3545 static inline T VmaAlignDown(T val, T align)
    3546 {
    3547  return val / align * align;
    3548 }
    3549 
    3550 // Division with mathematical rounding to nearest number.
    3551 template <typename T>
    3552 static inline T VmaRoundDiv(T x, T y)
    3553 {
    3554  return (x + (y / (T)2)) / y;
    3555 }
    3556 
    3557 /*
    3558 Returns true if given number is a power of two.
    3559 T must be unsigned integer number or signed integer but always nonnegative.
    3560 For 0 returns true.
    3561 */
    3562 template <typename T>
    3563 inline bool VmaIsPow2(T x)
    3564 {
    3565  return (x & (x-1)) == 0;
    3566 }
    3567 
    3568 // Returns smallest power of 2 greater or equal to v.
    3569 static inline uint32_t VmaNextPow2(uint32_t v)
    3570 {
    3571  v--;
    3572  v |= v >> 1;
    3573  v |= v >> 2;
    3574  v |= v >> 4;
    3575  v |= v >> 8;
    3576  v |= v >> 16;
    3577  v++;
    3578  return v;
    3579 }
    3580 static inline uint64_t VmaNextPow2(uint64_t v)
    3581 {
    3582  v--;
    3583  v |= v >> 1;
    3584  v |= v >> 2;
    3585  v |= v >> 4;
    3586  v |= v >> 8;
    3587  v |= v >> 16;
    3588  v |= v >> 32;
    3589  v++;
    3590  return v;
    3591 }
    3592 
    3593 // Returns largest power of 2 less or equal to v.
    3594 static inline uint32_t VmaPrevPow2(uint32_t v)
    3595 {
    3596  v |= v >> 1;
    3597  v |= v >> 2;
    3598  v |= v >> 4;
    3599  v |= v >> 8;
    3600  v |= v >> 16;
    3601  v = v ^ (v >> 1);
    3602  return v;
    3603 }
    3604 static inline uint64_t VmaPrevPow2(uint64_t v)
    3605 {
    3606  v |= v >> 1;
    3607  v |= v >> 2;
    3608  v |= v >> 4;
    3609  v |= v >> 8;
    3610  v |= v >> 16;
    3611  v |= v >> 32;
    3612  v = v ^ (v >> 1);
    3613  return v;
    3614 }
    3615 
    3616 static inline bool VmaStrIsEmpty(const char* pStr)
    3617 {
    3618  return pStr == VMA_NULL || *pStr == '\0';
    3619 }
    3620 
    3621 #if VMA_STATS_STRING_ENABLED
    3622 
    3623 static const char* VmaAlgorithmToStr(uint32_t algorithm)
    3624 {
    3625  switch(algorithm)
    3626  {
    3628  return "Linear";
    3630  return "Buddy";
    3631  case 0:
    3632  return "Default";
    3633  default:
    3634  VMA_ASSERT(0);
    3635  return "";
    3636  }
    3637 }
    3638 
    3639 #endif // #if VMA_STATS_STRING_ENABLED
    3640 
    3641 #ifndef VMA_SORT
    3642 
    3643 template<typename Iterator, typename Compare>
    3644 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    3645 {
    3646  Iterator centerValue = end; --centerValue;
    3647  Iterator insertIndex = beg;
    3648  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    3649  {
    3650  if(cmp(*memTypeIndex, *centerValue))
    3651  {
    3652  if(insertIndex != memTypeIndex)
    3653  {
    3654  VMA_SWAP(*memTypeIndex, *insertIndex);
    3655  }
    3656  ++insertIndex;
    3657  }
    3658  }
    3659  if(insertIndex != centerValue)
    3660  {
    3661  VMA_SWAP(*insertIndex, *centerValue);
    3662  }
    3663  return insertIndex;
    3664 }
    3665 
    3666 template<typename Iterator, typename Compare>
    3667 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    3668 {
    3669  if(beg < end)
    3670  {
    3671  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    3672  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    3673  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    3674  }
    3675 }
    3676 
    3677 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    3678 
    3679 #endif // #ifndef VMA_SORT
    3680 
    3681 /*
    3682 Returns true if two memory blocks occupy overlapping pages.
    3683 ResourceA must be in less memory offset than ResourceB.
    3684 
    3685 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    3686 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    3687 */
    3688 static inline bool VmaBlocksOnSamePage(
    3689  VkDeviceSize resourceAOffset,
    3690  VkDeviceSize resourceASize,
    3691  VkDeviceSize resourceBOffset,
    3692  VkDeviceSize pageSize)
    3693 {
    3694  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    3695  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    3696  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    3697  VkDeviceSize resourceBStart = resourceBOffset;
    3698  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    3699  return resourceAEndPage == resourceBStartPage;
    3700 }
    3701 
    3702 enum VmaSuballocationType
    3703 {
    3704  VMA_SUBALLOCATION_TYPE_FREE = 0,
    3705  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    3706  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    3707  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    3708  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    3709  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    3710  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    3711 };
    3712 
    3713 /*
    3714 Returns true if given suballocation types could conflict and must respect
    3715 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    3716 or linear image and another one is optimal image. If type is unknown, behave
    3717 conservatively.
    3718 */
    3719 static inline bool VmaIsBufferImageGranularityConflict(
    3720  VmaSuballocationType suballocType1,
    3721  VmaSuballocationType suballocType2)
    3722 {
    3723  if(suballocType1 > suballocType2)
    3724  {
    3725  VMA_SWAP(suballocType1, suballocType2);
    3726  }
    3727 
    3728  switch(suballocType1)
    3729  {
    3730  case VMA_SUBALLOCATION_TYPE_FREE:
    3731  return false;
    3732  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    3733  return true;
    3734  case VMA_SUBALLOCATION_TYPE_BUFFER:
    3735  return
    3736  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3737  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3738  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    3739  return
    3740  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3741  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    3742  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3743  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    3744  return
    3745  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3746  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    3747  return false;
    3748  default:
    3749  VMA_ASSERT(0);
    3750  return true;
    3751  }
    3752 }
    3753 
    3754 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
    3755 {
    3756 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
    3757  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
    3758  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3759  for(size_t i = 0; i < numberCount; ++i, ++pDst)
    3760  {
    3761  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
    3762  }
    3763 #else
    3764  // no-op
    3765 #endif
    3766 }
    3767 
    3768 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
    3769 {
    3770 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
    3771  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
    3772  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3773  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
    3774  {
    3775  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
    3776  {
    3777  return false;
    3778  }
    3779  }
    3780 #endif
    3781  return true;
    3782 }
    3783 
    3784 /*
    3785 Fills structure with parameters of an example buffer to be used for transfers
    3786 during GPU memory defragmentation.
    3787 */
    3788 static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
    3789 {
    3790  memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
    3791  outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
    3792  outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
    3793  outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
    3794 }
    3795 
    3796 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    3797 struct VmaMutexLock
    3798 {
    3799  VMA_CLASS_NO_COPY(VmaMutexLock)
    3800 public:
    3801  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
    3802  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3803  { if(m_pMutex) { m_pMutex->Lock(); } }
    3804  ~VmaMutexLock()
    3805  { if(m_pMutex) { m_pMutex->Unlock(); } }
    3806 private:
    3807  VMA_MUTEX* m_pMutex;
    3808 };
    3809 
    3810 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
    3811 struct VmaMutexLockRead
    3812 {
    3813  VMA_CLASS_NO_COPY(VmaMutexLockRead)
    3814 public:
    3815  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
    3816  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3817  { if(m_pMutex) { m_pMutex->LockRead(); } }
    3818  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
    3819 private:
    3820  VMA_RW_MUTEX* m_pMutex;
    3821 };
    3822 
    3823 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
    3824 struct VmaMutexLockWrite
    3825 {
    3826  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
    3827 public:
    3828  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
    3829  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3830  { if(m_pMutex) { m_pMutex->LockWrite(); } }
    3831  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
    3832 private:
    3833  VMA_RW_MUTEX* m_pMutex;
    3834 };
    3835 
    3836 #if VMA_DEBUG_GLOBAL_MUTEX
    3837  static VMA_MUTEX gDebugGlobalMutex;
    3838  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    3839 #else
    3840  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    3841 #endif
    3842 
    3843 // Minimum size of a free suballocation to register it in the free suballocation collection.
    3844 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    3845 
    3846 /*
    3847 Performs binary search and returns iterator to first element that is greater or
    3848 equal to (key), according to comparison (cmp).
    3849 
    3850 Cmp should return true if first argument is less than second argument.
    3851 
    3852 Returned value is the found element, if present in the collection or place where
    3853 new element with value (key) should be inserted.
    3854 */
    3855 template <typename CmpLess, typename IterT, typename KeyT>
    3856 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, const CmpLess& cmp)
    3857 {
    3858  size_t down = 0, up = (end - beg);
    3859  while(down < up)
    3860  {
    3861  const size_t mid = (down + up) / 2;
    3862  if(cmp(*(beg+mid), key))
    3863  {
    3864  down = mid + 1;
    3865  }
    3866  else
    3867  {
    3868  up = mid;
    3869  }
    3870  }
    3871  return beg + down;
    3872 }
    3873 
    3874 template<typename CmpLess, typename IterT, typename KeyT>
    3875 IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
    3876 {
    3877  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
    3878  beg, end, value, cmp);
    3879  if(it == end ||
    3880  (!cmp(*it, value) && !cmp(value, *it)))
    3881  {
    3882  return it;
    3883  }
    3884  return end;
    3885 }
    3886 
    3887 /*
    3888 Returns true if all pointers in the array are not-null and unique.
    3889 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
    3890 T must be pointer type, e.g. VmaAllocation, VmaPool.
    3891 */
    3892 template<typename T>
    3893 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
    3894 {
    3895  for(uint32_t i = 0; i < count; ++i)
    3896  {
    3897  const T iPtr = arr[i];
    3898  if(iPtr == VMA_NULL)
    3899  {
    3900  return false;
    3901  }
    3902  for(uint32_t j = i + 1; j < count; ++j)
    3903  {
    3904  if(iPtr == arr[j])
    3905  {
    3906  return false;
    3907  }
    3908  }
    3909  }
    3910  return true;
    3911 }
    3912 
    3914 // Memory allocation
    3915 
    3916 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    3917 {
    3918  if((pAllocationCallbacks != VMA_NULL) &&
    3919  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    3920  {
    3921  return (*pAllocationCallbacks->pfnAllocation)(
    3922  pAllocationCallbacks->pUserData,
    3923  size,
    3924  alignment,
    3925  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    3926  }
    3927  else
    3928  {
    3929  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    3930  }
    3931 }
    3932 
    3933 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    3934 {
    3935  if((pAllocationCallbacks != VMA_NULL) &&
    3936  (pAllocationCallbacks->pfnFree != VMA_NULL))
    3937  {
    3938  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    3939  }
    3940  else
    3941  {
    3942  VMA_SYSTEM_FREE(ptr);
    3943  }
    3944 }
    3945 
    3946 template<typename T>
    3947 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    3948 {
    3949  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    3950 }
    3951 
    3952 template<typename T>
    3953 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    3954 {
    3955  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    3956 }
    3957 
    3958 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    3959 
    3960 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    3961 
    3962 template<typename T>
    3963 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    3964 {
    3965  ptr->~T();
    3966  VmaFree(pAllocationCallbacks, ptr);
    3967 }
    3968 
    3969 template<typename T>
    3970 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    3971 {
    3972  if(ptr != VMA_NULL)
    3973  {
    3974  for(size_t i = count; i--; )
    3975  {
    3976  ptr[i].~T();
    3977  }
    3978  VmaFree(pAllocationCallbacks, ptr);
    3979  }
    3980 }
    3981 
    3982 // STL-compatible allocator.
    3983 template<typename T>
    3984 class VmaStlAllocator
    3985 {
    3986 public:
    3987  const VkAllocationCallbacks* const m_pCallbacks;
    3988  typedef T value_type;
    3989 
    3990  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    3991  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    3992 
    3993  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    3994  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    3995 
    3996  template<typename U>
    3997  bool operator==(const VmaStlAllocator<U>& rhs) const
    3998  {
    3999  return m_pCallbacks == rhs.m_pCallbacks;
    4000  }
    4001  template<typename U>
    4002  bool operator!=(const VmaStlAllocator<U>& rhs) const
    4003  {
    4004  return m_pCallbacks != rhs.m_pCallbacks;
    4005  }
    4006 
    4007  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    4008 };
    4009 
    4010 #if VMA_USE_STL_VECTOR
    4011 
    4012 #define VmaVector std::vector
    4013 
    4014 template<typename T, typename allocatorT>
    4015 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    4016 {
    4017  vec.insert(vec.begin() + index, item);
    4018 }
    4019 
    4020 template<typename T, typename allocatorT>
    4021 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    4022 {
    4023  vec.erase(vec.begin() + index);
    4024 }
    4025 
    4026 #else // #if VMA_USE_STL_VECTOR
    4027 
    4028 /* Class with interface compatible with subset of std::vector.
    4029 T must be POD because constructors and destructors are not called and memcpy is
    4030 used for these objects. */
    4031 template<typename T, typename AllocatorT>
    4032 class VmaVector
    4033 {
    4034 public:
    4035  typedef T value_type;
    4036 
    4037  VmaVector(const AllocatorT& allocator) :
    4038  m_Allocator(allocator),
    4039  m_pArray(VMA_NULL),
    4040  m_Count(0),
    4041  m_Capacity(0)
    4042  {
    4043  }
    4044 
    4045  VmaVector(size_t count, const AllocatorT& allocator) :
    4046  m_Allocator(allocator),
    4047  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    4048  m_Count(count),
    4049  m_Capacity(count)
    4050  {
    4051  }
    4052 
    4053  VmaVector(const VmaVector<T, AllocatorT>& src) :
    4054  m_Allocator(src.m_Allocator),
    4055  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    4056  m_Count(src.m_Count),
    4057  m_Capacity(src.m_Count)
    4058  {
    4059  if(m_Count != 0)
    4060  {
    4061  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    4062  }
    4063  }
    4064 
    4065  ~VmaVector()
    4066  {
    4067  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    4068  }
    4069 
    4070  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    4071  {
    4072  if(&rhs != this)
    4073  {
    4074  resize(rhs.m_Count);
    4075  if(m_Count != 0)
    4076  {
    4077  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    4078  }
    4079  }
    4080  return *this;
    4081  }
    4082 
    4083  bool empty() const { return m_Count == 0; }
    4084  size_t size() const { return m_Count; }
    4085  T* data() { return m_pArray; }
    4086  const T* data() const { return m_pArray; }
    4087 
    4088  T& operator[](size_t index)
    4089  {
    4090  VMA_HEAVY_ASSERT(index < m_Count);
    4091  return m_pArray[index];
    4092  }
    4093  const T& operator[](size_t index) const
    4094  {
    4095  VMA_HEAVY_ASSERT(index < m_Count);
    4096  return m_pArray[index];
    4097  }
    4098 
    4099  T& front()
    4100  {
    4101  VMA_HEAVY_ASSERT(m_Count > 0);
    4102  return m_pArray[0];
    4103  }
    4104  const T& front() const
    4105  {
    4106  VMA_HEAVY_ASSERT(m_Count > 0);
    4107  return m_pArray[0];
    4108  }
    4109  T& back()
    4110  {
    4111  VMA_HEAVY_ASSERT(m_Count > 0);
    4112  return m_pArray[m_Count - 1];
    4113  }
    4114  const T& back() const
    4115  {
    4116  VMA_HEAVY_ASSERT(m_Count > 0);
    4117  return m_pArray[m_Count - 1];
    4118  }
    4119 
    4120  void reserve(size_t newCapacity, bool freeMemory = false)
    4121  {
    4122  newCapacity = VMA_MAX(newCapacity, m_Count);
    4123 
    4124  if((newCapacity < m_Capacity) && !freeMemory)
    4125  {
    4126  newCapacity = m_Capacity;
    4127  }
    4128 
    4129  if(newCapacity != m_Capacity)
    4130  {
    4131  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    4132  if(m_Count != 0)
    4133  {
    4134  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    4135  }
    4136  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    4137  m_Capacity = newCapacity;
    4138  m_pArray = newArray;
    4139  }
    4140  }
    4141 
    4142  void resize(size_t newCount, bool freeMemory = false)
    4143  {
    4144  size_t newCapacity = m_Capacity;
    4145  if(newCount > m_Capacity)
    4146  {
    4147  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    4148  }
    4149  else if(freeMemory)
    4150  {
    4151  newCapacity = newCount;
    4152  }
    4153 
    4154  if(newCapacity != m_Capacity)
    4155  {
    4156  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    4157  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    4158  if(elementsToCopy != 0)
    4159  {
    4160  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    4161  }
    4162  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    4163  m_Capacity = newCapacity;
    4164  m_pArray = newArray;
    4165  }
    4166 
    4167  m_Count = newCount;
    4168  }
    4169 
    4170  void clear(bool freeMemory = false)
    4171  {
    4172  resize(0, freeMemory);
    4173  }
    4174 
    4175  void insert(size_t index, const T& src)
    4176  {
    4177  VMA_HEAVY_ASSERT(index <= m_Count);
    4178  const size_t oldCount = size();
    4179  resize(oldCount + 1);
    4180  if(index < oldCount)
    4181  {
    4182  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    4183  }
    4184  m_pArray[index] = src;
    4185  }
    4186 
    4187  void remove(size_t index)
    4188  {
    4189  VMA_HEAVY_ASSERT(index < m_Count);
    4190  const size_t oldCount = size();
    4191  if(index < oldCount - 1)
    4192  {
    4193  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    4194  }
    4195  resize(oldCount - 1);
    4196  }
    4197 
    4198  void push_back(const T& src)
    4199  {
    4200  const size_t newIndex = size();
    4201  resize(newIndex + 1);
    4202  m_pArray[newIndex] = src;
    4203  }
    4204 
    4205  void pop_back()
    4206  {
    4207  VMA_HEAVY_ASSERT(m_Count > 0);
    4208  resize(size() - 1);
    4209  }
    4210 
    4211  void push_front(const T& src)
    4212  {
    4213  insert(0, src);
    4214  }
    4215 
    4216  void pop_front()
    4217  {
    4218  VMA_HEAVY_ASSERT(m_Count > 0);
    4219  remove(0);
    4220  }
    4221 
    4222  typedef T* iterator;
    4223 
    4224  iterator begin() { return m_pArray; }
    4225  iterator end() { return m_pArray + m_Count; }
    4226 
    4227 private:
    4228  AllocatorT m_Allocator;
    4229  T* m_pArray;
    4230  size_t m_Count;
    4231  size_t m_Capacity;
    4232 };
    4233 
    4234 template<typename T, typename allocatorT>
    4235 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    4236 {
    4237  vec.insert(index, item);
    4238 }
    4239 
    4240 template<typename T, typename allocatorT>
    4241 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    4242 {
    4243  vec.remove(index);
    4244 }
    4245 
    4246 #endif // #if VMA_USE_STL_VECTOR
    4247 
    4248 template<typename CmpLess, typename VectorT>
    4249 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    4250 {
    4251  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4252  vector.data(),
    4253  vector.data() + vector.size(),
    4254  value,
    4255  CmpLess()) - vector.data();
    4256  VmaVectorInsert(vector, indexToInsert, value);
    4257  return indexToInsert;
    4258 }
    4259 
    4260 template<typename CmpLess, typename VectorT>
    4261 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    4262 {
    4263  CmpLess comparator;
    4264  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    4265  vector.begin(),
    4266  vector.end(),
    4267  value,
    4268  comparator);
    4269  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    4270  {
    4271  size_t indexToRemove = it - vector.begin();
    4272  VmaVectorRemove(vector, indexToRemove);
    4273  return true;
    4274  }
    4275  return false;
    4276 }
    4277 
    4279 // class VmaPoolAllocator
    4280 
    4281 /*
    4282 Allocator for objects of type T using a list of arrays (pools) to speed up
    4283 allocation. Number of elements that can be allocated is not bounded because
    4284 allocator can create multiple blocks.
    4285 */
    4286 template<typename T>
    4287 class VmaPoolAllocator
    4288 {
    4289  VMA_CLASS_NO_COPY(VmaPoolAllocator)
    4290 public:
    4291  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
    4292  ~VmaPoolAllocator();
    4293  void Clear();
    4294  T* Alloc();
    4295  void Free(T* ptr);
    4296 
    4297 private:
    4298  union Item
    4299  {
    4300  uint32_t NextFreeIndex;
    4301  T Value;
    4302  };
    4303 
    4304  struct ItemBlock
    4305  {
    4306  Item* pItems;
    4307  uint32_t Capacity;
    4308  uint32_t FirstFreeIndex;
    4309  };
    4310 
    4311  const VkAllocationCallbacks* m_pAllocationCallbacks;
    4312  const uint32_t m_FirstBlockCapacity;
    4313  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    4314 
    4315  ItemBlock& CreateNewBlock();
    4316 };
    4317 
    4318 template<typename T>
    4319 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
    4320  m_pAllocationCallbacks(pAllocationCallbacks),
    4321  m_FirstBlockCapacity(firstBlockCapacity),
    4322  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    4323 {
    4324  VMA_ASSERT(m_FirstBlockCapacity > 1);
    4325 }
    4326 
    4327 template<typename T>
    4328 VmaPoolAllocator<T>::~VmaPoolAllocator()
    4329 {
    4330  Clear();
    4331 }
    4332 
    4333 template<typename T>
    4334 void VmaPoolAllocator<T>::Clear()
    4335 {
    4336  for(size_t i = m_ItemBlocks.size(); i--; )
    4337  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
    4338  m_ItemBlocks.clear();
    4339 }
    4340 
    4341 template<typename T>
    4342 T* VmaPoolAllocator<T>::Alloc()
    4343 {
    4344  for(size_t i = m_ItemBlocks.size(); i--; )
    4345  {
    4346  ItemBlock& block = m_ItemBlocks[i];
    4347  // This block has some free items: Use first one.
    4348  if(block.FirstFreeIndex != UINT32_MAX)
    4349  {
    4350  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    4351  block.FirstFreeIndex = pItem->NextFreeIndex;
    4352  return &pItem->Value;
    4353  }
    4354  }
    4355 
    4356  // No block has free item: Create new one and use it.
    4357  ItemBlock& newBlock = CreateNewBlock();
    4358  Item* const pItem = &newBlock.pItems[0];
    4359  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    4360  return &pItem->Value;
    4361 }
    4362 
    4363 template<typename T>
    4364 void VmaPoolAllocator<T>::Free(T* ptr)
    4365 {
    4366  // Search all memory blocks to find ptr.
    4367  for(size_t i = m_ItemBlocks.size(); i--; )
    4368  {
    4369  ItemBlock& block = m_ItemBlocks[i];
    4370 
    4371  // Casting to union.
    4372  Item* pItemPtr;
    4373  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    4374 
    4375  // Check if pItemPtr is in address range of this block.
    4376  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
    4377  {
    4378  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    4379  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    4380  block.FirstFreeIndex = index;
    4381  return;
    4382  }
    4383  }
    4384  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    4385 }
    4386 
    4387 template<typename T>
    4388 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    4389 {
    4390  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
    4391  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
    4392 
    4393  const ItemBlock newBlock = {
    4394  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
    4395  newBlockCapacity,
    4396  0 };
    4397 
    4398  m_ItemBlocks.push_back(newBlock);
    4399 
    4400  // Setup singly-linked list of all free items in this block.
    4401  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
    4402  newBlock.pItems[i].NextFreeIndex = i + 1;
    4403  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
    4404  return m_ItemBlocks.back();
    4405 }
    4406 
    4408 // class VmaRawList, VmaList
    4409 
    4410 #if VMA_USE_STL_LIST
    4411 
    4412 #define VmaList std::list
    4413 
    4414 #else // #if VMA_USE_STL_LIST
    4415 
    4416 template<typename T>
    4417 struct VmaListItem
    4418 {
    4419  VmaListItem* pPrev;
    4420  VmaListItem* pNext;
    4421  T Value;
    4422 };
    4423 
    4424 // Doubly linked list.
    4425 template<typename T>
    4426 class VmaRawList
    4427 {
    4428  VMA_CLASS_NO_COPY(VmaRawList)
    4429 public:
    4430  typedef VmaListItem<T> ItemType;
    4431 
    4432  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    4433  ~VmaRawList();
    4434  void Clear();
    4435 
    4436  size_t GetCount() const { return m_Count; }
    4437  bool IsEmpty() const { return m_Count == 0; }
    4438 
    4439  ItemType* Front() { return m_pFront; }
    4440  const ItemType* Front() const { return m_pFront; }
    4441  ItemType* Back() { return m_pBack; }
    4442  const ItemType* Back() const { return m_pBack; }
    4443 
    4444  ItemType* PushBack();
    4445  ItemType* PushFront();
    4446  ItemType* PushBack(const T& value);
    4447  ItemType* PushFront(const T& value);
    4448  void PopBack();
    4449  void PopFront();
    4450 
    4451  // Item can be null - it means PushBack.
    4452  ItemType* InsertBefore(ItemType* pItem);
    4453  // Item can be null - it means PushFront.
    4454  ItemType* InsertAfter(ItemType* pItem);
    4455 
    4456  ItemType* InsertBefore(ItemType* pItem, const T& value);
    4457  ItemType* InsertAfter(ItemType* pItem, const T& value);
    4458 
    4459  void Remove(ItemType* pItem);
    4460 
    4461 private:
    4462  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    4463  VmaPoolAllocator<ItemType> m_ItemAllocator;
    4464  ItemType* m_pFront;
    4465  ItemType* m_pBack;
    4466  size_t m_Count;
    4467 };
    4468 
    4469 template<typename T>
    4470 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    4471  m_pAllocationCallbacks(pAllocationCallbacks),
    4472  m_ItemAllocator(pAllocationCallbacks, 128),
    4473  m_pFront(VMA_NULL),
    4474  m_pBack(VMA_NULL),
    4475  m_Count(0)
    4476 {
    4477 }
    4478 
    4479 template<typename T>
    4480 VmaRawList<T>::~VmaRawList()
    4481 {
    4482  // Intentionally not calling Clear, because that would be unnecessary
    4483  // computations to return all items to m_ItemAllocator as free.
    4484 }
    4485 
    4486 template<typename T>
    4487 void VmaRawList<T>::Clear()
    4488 {
    4489  if(IsEmpty() == false)
    4490  {
    4491  ItemType* pItem = m_pBack;
    4492  while(pItem != VMA_NULL)
    4493  {
    4494  ItemType* const pPrevItem = pItem->pPrev;
    4495  m_ItemAllocator.Free(pItem);
    4496  pItem = pPrevItem;
    4497  }
    4498  m_pFront = VMA_NULL;
    4499  m_pBack = VMA_NULL;
    4500  m_Count = 0;
    4501  }
    4502 }
    4503 
    4504 template<typename T>
    4505 VmaListItem<T>* VmaRawList<T>::PushBack()
    4506 {
    4507  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    4508  pNewItem->pNext = VMA_NULL;
    4509  if(IsEmpty())
    4510  {
    4511  pNewItem->pPrev = VMA_NULL;
    4512  m_pFront = pNewItem;
    4513  m_pBack = pNewItem;
    4514  m_Count = 1;
    4515  }
    4516  else
    4517  {
    4518  pNewItem->pPrev = m_pBack;
    4519  m_pBack->pNext = pNewItem;
    4520  m_pBack = pNewItem;
    4521  ++m_Count;
    4522  }
    4523  return pNewItem;
    4524 }
    4525 
    4526 template<typename T>
    4527 VmaListItem<T>* VmaRawList<T>::PushFront()
    4528 {
    4529  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    4530  pNewItem->pPrev = VMA_NULL;
    4531  if(IsEmpty())
    4532  {
    4533  pNewItem->pNext = VMA_NULL;
    4534  m_pFront = pNewItem;
    4535  m_pBack = pNewItem;
    4536  m_Count = 1;
    4537  }
    4538  else
    4539  {
    4540  pNewItem->pNext = m_pFront;
    4541  m_pFront->pPrev = pNewItem;
    4542  m_pFront = pNewItem;
    4543  ++m_Count;
    4544  }
    4545  return pNewItem;
    4546 }
    4547 
    4548 template<typename T>
    4549 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    4550 {
    4551  ItemType* const pNewItem = PushBack();
    4552  pNewItem->Value = value;
    4553  return pNewItem;
    4554 }
    4555 
    4556 template<typename T>
    4557 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    4558 {
    4559  ItemType* const pNewItem = PushFront();
    4560  pNewItem->Value = value;
    4561  return pNewItem;
    4562 }
    4563 
    4564 template<typename T>
    4565 void VmaRawList<T>::PopBack()
    4566 {
    4567  VMA_HEAVY_ASSERT(m_Count > 0);
    4568  ItemType* const pBackItem = m_pBack;
    4569  ItemType* const pPrevItem = pBackItem->pPrev;
    4570  if(pPrevItem != VMA_NULL)
    4571  {
    4572  pPrevItem->pNext = VMA_NULL;
    4573  }
    4574  m_pBack = pPrevItem;
    4575  m_ItemAllocator.Free(pBackItem);
    4576  --m_Count;
    4577 }
    4578 
    4579 template<typename T>
    4580 void VmaRawList<T>::PopFront()
    4581 {
    4582  VMA_HEAVY_ASSERT(m_Count > 0);
    4583  ItemType* const pFrontItem = m_pFront;
    4584  ItemType* const pNextItem = pFrontItem->pNext;
    4585  if(pNextItem != VMA_NULL)
    4586  {
    4587  pNextItem->pPrev = VMA_NULL;
    4588  }
    4589  m_pFront = pNextItem;
    4590  m_ItemAllocator.Free(pFrontItem);
    4591  --m_Count;
    4592 }
    4593 
    4594 template<typename T>
    4595 void VmaRawList<T>::Remove(ItemType* pItem)
    4596 {
    4597  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    4598  VMA_HEAVY_ASSERT(m_Count > 0);
    4599 
    4600  if(pItem->pPrev != VMA_NULL)
    4601  {
    4602  pItem->pPrev->pNext = pItem->pNext;
    4603  }
    4604  else
    4605  {
    4606  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4607  m_pFront = pItem->pNext;
    4608  }
    4609 
    4610  if(pItem->pNext != VMA_NULL)
    4611  {
    4612  pItem->pNext->pPrev = pItem->pPrev;
    4613  }
    4614  else
    4615  {
    4616  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4617  m_pBack = pItem->pPrev;
    4618  }
    4619 
    4620  m_ItemAllocator.Free(pItem);
    4621  --m_Count;
    4622 }
    4623 
    4624 template<typename T>
    4625 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    4626 {
    4627  if(pItem != VMA_NULL)
    4628  {
    4629  ItemType* const prevItem = pItem->pPrev;
    4630  ItemType* const newItem = m_ItemAllocator.Alloc();
    4631  newItem->pPrev = prevItem;
    4632  newItem->pNext = pItem;
    4633  pItem->pPrev = newItem;
    4634  if(prevItem != VMA_NULL)
    4635  {
    4636  prevItem->pNext = newItem;
    4637  }
    4638  else
    4639  {
    4640  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4641  m_pFront = newItem;
    4642  }
    4643  ++m_Count;
    4644  return newItem;
    4645  }
    4646  else
    4647  return PushBack();
    4648 }
    4649 
    4650 template<typename T>
    4651 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    4652 {
    4653  if(pItem != VMA_NULL)
    4654  {
    4655  ItemType* const nextItem = pItem->pNext;
    4656  ItemType* const newItem = m_ItemAllocator.Alloc();
    4657  newItem->pNext = nextItem;
    4658  newItem->pPrev = pItem;
    4659  pItem->pNext = newItem;
    4660  if(nextItem != VMA_NULL)
    4661  {
    4662  nextItem->pPrev = newItem;
    4663  }
    4664  else
    4665  {
    4666  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4667  m_pBack = newItem;
    4668  }
    4669  ++m_Count;
    4670  return newItem;
    4671  }
    4672  else
    4673  return PushFront();
    4674 }
    4675 
    4676 template<typename T>
    4677 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    4678 {
    4679  ItemType* const newItem = InsertBefore(pItem);
    4680  newItem->Value = value;
    4681  return newItem;
    4682 }
    4683 
    4684 template<typename T>
    4685 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    4686 {
    4687  ItemType* const newItem = InsertAfter(pItem);
    4688  newItem->Value = value;
    4689  return newItem;
    4690 }
    4691 
    4692 template<typename T, typename AllocatorT>
    4693 class VmaList
    4694 {
    4695  VMA_CLASS_NO_COPY(VmaList)
    4696 public:
    4697  class iterator
    4698  {
    4699  public:
    4700  iterator() :
    4701  m_pList(VMA_NULL),
    4702  m_pItem(VMA_NULL)
    4703  {
    4704  }
    4705 
    4706  T& operator*() const
    4707  {
    4708  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4709  return m_pItem->Value;
    4710  }
    4711  T* operator->() const
    4712  {
    4713  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4714  return &m_pItem->Value;
    4715  }
    4716 
    4717  iterator& operator++()
    4718  {
    4719  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4720  m_pItem = m_pItem->pNext;
    4721  return *this;
    4722  }
    4723  iterator& operator--()
    4724  {
    4725  if(m_pItem != VMA_NULL)
    4726  {
    4727  m_pItem = m_pItem->pPrev;
    4728  }
    4729  else
    4730  {
    4731  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4732  m_pItem = m_pList->Back();
    4733  }
    4734  return *this;
    4735  }
    4736 
    4737  iterator operator++(int)
    4738  {
    4739  iterator result = *this;
    4740  ++*this;
    4741  return result;
    4742  }
    4743  iterator operator--(int)
    4744  {
    4745  iterator result = *this;
    4746  --*this;
    4747  return result;
    4748  }
    4749 
    4750  bool operator==(const iterator& rhs) const
    4751  {
    4752  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4753  return m_pItem == rhs.m_pItem;
    4754  }
    4755  bool operator!=(const iterator& rhs) const
    4756  {
    4757  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4758  return m_pItem != rhs.m_pItem;
    4759  }
    4760 
    4761  private:
    4762  VmaRawList<T>* m_pList;
    4763  VmaListItem<T>* m_pItem;
    4764 
    4765  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    4766  m_pList(pList),
    4767  m_pItem(pItem)
    4768  {
    4769  }
    4770 
    4771  friend class VmaList<T, AllocatorT>;
    4772  };
    4773 
    4774  class const_iterator
    4775  {
    4776  public:
    4777  const_iterator() :
    4778  m_pList(VMA_NULL),
    4779  m_pItem(VMA_NULL)
    4780  {
    4781  }
    4782 
    4783  const_iterator(const iterator& src) :
    4784  m_pList(src.m_pList),
    4785  m_pItem(src.m_pItem)
    4786  {
    4787  }
    4788 
    4789  const T& operator*() const
    4790  {
    4791  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4792  return m_pItem->Value;
    4793  }
    4794  const T* operator->() const
    4795  {
    4796  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4797  return &m_pItem->Value;
    4798  }
    4799 
    4800  const_iterator& operator++()
    4801  {
    4802  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4803  m_pItem = m_pItem->pNext;
    4804  return *this;
    4805  }
    4806  const_iterator& operator--()
    4807  {
    4808  if(m_pItem != VMA_NULL)
    4809  {
    4810  m_pItem = m_pItem->pPrev;
    4811  }
    4812  else
    4813  {
    4814  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4815  m_pItem = m_pList->Back();
    4816  }
    4817  return *this;
    4818  }
    4819 
    4820  const_iterator operator++(int)
    4821  {
    4822  const_iterator result = *this;
    4823  ++*this;
    4824  return result;
    4825  }
    4826  const_iterator operator--(int)
    4827  {
    4828  const_iterator result = *this;
    4829  --*this;
    4830  return result;
    4831  }
    4832 
    4833  bool operator==(const const_iterator& rhs) const
    4834  {
    4835  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4836  return m_pItem == rhs.m_pItem;
    4837  }
    4838  bool operator!=(const const_iterator& rhs) const
    4839  {
    4840  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4841  return m_pItem != rhs.m_pItem;
    4842  }
    4843 
    4844  private:
    4845  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    4846  m_pList(pList),
    4847  m_pItem(pItem)
    4848  {
    4849  }
    4850 
    4851  const VmaRawList<T>* m_pList;
    4852  const VmaListItem<T>* m_pItem;
    4853 
    4854  friend class VmaList<T, AllocatorT>;
    4855  };
    4856 
    4857  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    4858 
    4859  bool empty() const { return m_RawList.IsEmpty(); }
    4860  size_t size() const { return m_RawList.GetCount(); }
    4861 
    4862  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    4863  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    4864 
    4865  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    4866  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    4867 
    4868  void clear() { m_RawList.Clear(); }
    4869  void push_back(const T& value) { m_RawList.PushBack(value); }
    4870  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    4871  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    4872 
    4873 private:
    4874  VmaRawList<T> m_RawList;
    4875 };
    4876 
    4877 #endif // #if VMA_USE_STL_LIST
    4878 
    4880 // class VmaMap
    4881 
    4882 // Unused in this version.
    4883 #if 0
    4884 
    4885 #if VMA_USE_STL_UNORDERED_MAP
    4886 
    4887 #define VmaPair std::pair
    4888 
    4889 #define VMA_MAP_TYPE(KeyT, ValueT) \
    4890  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    4891 
    4892 #else // #if VMA_USE_STL_UNORDERED_MAP
    4893 
    4894 template<typename T1, typename T2>
    4895 struct VmaPair
    4896 {
    4897  T1 first;
    4898  T2 second;
    4899 
    4900  VmaPair() : first(), second() { }
    4901  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    4902 };
    4903 
    4904 /* Class compatible with subset of interface of std::unordered_map.
    4905 KeyT, ValueT must be POD because they will be stored in VmaVector.
    4906 */
    4907 template<typename KeyT, typename ValueT>
    4908 class VmaMap
    4909 {
    4910 public:
    4911  typedef VmaPair<KeyT, ValueT> PairType;
    4912  typedef PairType* iterator;
    4913 
    4914  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    4915 
    4916  iterator begin() { return m_Vector.begin(); }
    4917  iterator end() { return m_Vector.end(); }
    4918 
    4919  void insert(const PairType& pair);
    4920  iterator find(const KeyT& key);
    4921  void erase(iterator it);
    4922 
    4923 private:
    4924  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    4925 };
    4926 
    4927 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    4928 
    4929 template<typename FirstT, typename SecondT>
    4930 struct VmaPairFirstLess
    4931 {
    4932  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    4933  {
    4934  return lhs.first < rhs.first;
    4935  }
    4936  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    4937  {
    4938  return lhs.first < rhsFirst;
    4939  }
    4940 };
    4941 
    4942 template<typename KeyT, typename ValueT>
    4943 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    4944 {
    4945  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4946  m_Vector.data(),
    4947  m_Vector.data() + m_Vector.size(),
    4948  pair,
    4949  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    4950  VmaVectorInsert(m_Vector, indexToInsert, pair);
    4951 }
    4952 
    4953 template<typename KeyT, typename ValueT>
    4954 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    4955 {
    4956  PairType* it = VmaBinaryFindFirstNotLess(
    4957  m_Vector.data(),
    4958  m_Vector.data() + m_Vector.size(),
    4959  key,
    4960  VmaPairFirstLess<KeyT, ValueT>());
    4961  if((it != m_Vector.end()) && (it->first == key))
    4962  {
    4963  return it;
    4964  }
    4965  else
    4966  {
    4967  return m_Vector.end();
    4968  }
    4969 }
    4970 
    4971 template<typename KeyT, typename ValueT>
    4972 void VmaMap<KeyT, ValueT>::erase(iterator it)
    4973 {
    4974  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    4975 }
    4976 
    4977 #endif // #if VMA_USE_STL_UNORDERED_MAP
    4978 
    4979 #endif // #if 0
    4980 
    4982 
    4983 class VmaDeviceMemoryBlock;
    4984 
    4985 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
    4986 
    4987 struct VmaAllocation_T
    4988 {
    4989 private:
    4990  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    4991 
    4992  enum FLAGS
    4993  {
    4994  FLAG_USER_DATA_STRING = 0x01,
    4995  };
    4996 
    4997 public:
    4998  enum ALLOCATION_TYPE
    4999  {
    5000  ALLOCATION_TYPE_NONE,
    5001  ALLOCATION_TYPE_BLOCK,
    5002  ALLOCATION_TYPE_DEDICATED,
    5003  };
    5004 
    5005  /*
    5006  This struct cannot have constructor or destructor. It must be POD because it is
    5007  allocated using VmaPoolAllocator.
    5008  */
    5009 
    5010  void Ctor(uint32_t currentFrameIndex, bool userDataString)
    5011  {
    5012  m_Alignment = 1;
    5013  m_Size = 0;
    5014  m_pUserData = VMA_NULL;
    5015  m_LastUseFrameIndex = currentFrameIndex;
    5016  m_Type = (uint8_t)ALLOCATION_TYPE_NONE;
    5017  m_SuballocationType = (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN;
    5018  m_MapCount = 0;
    5019  m_Flags = userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0;
    5020 
    5021 #if VMA_STATS_STRING_ENABLED
    5022  m_CreationFrameIndex = currentFrameIndex;
    5023  m_BufferImageUsage = 0;
    5024 #endif
    5025  }
    5026 
    5027  void Dtor()
    5028  {
    5029  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    5030 
    5031  // Check if owned string was freed.
    5032  VMA_ASSERT(m_pUserData == VMA_NULL);
    5033  }
    5034 
    5035  void InitBlockAllocation(
    5036  VmaDeviceMemoryBlock* block,
    5037  VkDeviceSize offset,
    5038  VkDeviceSize alignment,
    5039  VkDeviceSize size,
    5040  VmaSuballocationType suballocationType,
    5041  bool mapped,
    5042  bool canBecomeLost)
    5043  {
    5044  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    5045  VMA_ASSERT(block != VMA_NULL);
    5046  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    5047  m_Alignment = alignment;
    5048  m_Size = size;
    5049  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    5050  m_SuballocationType = (uint8_t)suballocationType;
    5051  m_BlockAllocation.m_Block = block;
    5052  m_BlockAllocation.m_Offset = offset;
    5053  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    5054  }
    5055 
    5056  void InitLost()
    5057  {
    5058  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    5059  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    5060  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    5061  m_BlockAllocation.m_Block = VMA_NULL;
    5062  m_BlockAllocation.m_Offset = 0;
    5063  m_BlockAllocation.m_CanBecomeLost = true;
    5064  }
    5065 
    5066  void ChangeBlockAllocation(
    5067  VmaAllocator hAllocator,
    5068  VmaDeviceMemoryBlock* block,
    5069  VkDeviceSize offset);
    5070 
    5071  void ChangeSize(VkDeviceSize newSize);
    5072  void ChangeOffset(VkDeviceSize newOffset);
    5073 
    5074  // pMappedData not null means allocation is created with MAPPED flag.
    5075  void InitDedicatedAllocation(
    5076  uint32_t memoryTypeIndex,
    5077  VkDeviceMemory hMemory,
    5078  VmaSuballocationType suballocationType,
    5079  void* pMappedData,
    5080  VkDeviceSize size)
    5081  {
    5082  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    5083  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    5084  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    5085  m_Alignment = 0;
    5086  m_Size = size;
    5087  m_SuballocationType = (uint8_t)suballocationType;
    5088  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    5089  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    5090  m_DedicatedAllocation.m_hMemory = hMemory;
    5091  m_DedicatedAllocation.m_pMappedData = pMappedData;
    5092  }
    5093 
    5094  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    5095  VkDeviceSize GetAlignment() const { return m_Alignment; }
    5096  VkDeviceSize GetSize() const { return m_Size; }
    5097  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
    5098  void* GetUserData() const { return m_pUserData; }
    5099  void SetUserData(VmaAllocator hAllocator, void* pUserData);
    5100  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    5101 
    5102  VmaDeviceMemoryBlock* GetBlock() const
    5103  {
    5104  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    5105  return m_BlockAllocation.m_Block;
    5106  }
    5107  VkDeviceSize GetOffset() const;
    5108  VkDeviceMemory GetMemory() const;
    5109  uint32_t GetMemoryTypeIndex() const;
    5110  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    5111  void* GetMappedData() const;
    5112  bool CanBecomeLost() const;
    5113 
    5114  uint32_t GetLastUseFrameIndex() const
    5115  {
    5116  return m_LastUseFrameIndex.load();
    5117  }
    5118  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    5119  {
    5120  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    5121  }
    5122  /*
    5123  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    5124  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    5125  - Else, returns false.
    5126 
    5127  If hAllocation is already lost, assert - you should not call it then.
    5128  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    5129  */
    5130  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5131 
    5132  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    5133  {
    5134  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    5135  outInfo.blockCount = 1;
    5136  outInfo.allocationCount = 1;
    5137  outInfo.unusedRangeCount = 0;
    5138  outInfo.usedBytes = m_Size;
    5139  outInfo.unusedBytes = 0;
    5140  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    5141  outInfo.unusedRangeSizeMin = UINT64_MAX;
    5142  outInfo.unusedRangeSizeMax = 0;
    5143  }
    5144 
    5145  void BlockAllocMap();
    5146  void BlockAllocUnmap();
    5147  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    5148  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    5149 
    5150 #if VMA_STATS_STRING_ENABLED
    5151  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
    5152  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
    5153 
    5154  void InitBufferImageUsage(uint32_t bufferImageUsage)
    5155  {
    5156  VMA_ASSERT(m_BufferImageUsage == 0);
    5157  m_BufferImageUsage = bufferImageUsage;
    5158  }
    5159 
    5160  void PrintParameters(class VmaJsonWriter& json) const;
    5161 #endif
    5162 
    5163 private:
    5164  VkDeviceSize m_Alignment;
    5165  VkDeviceSize m_Size;
    5166  void* m_pUserData;
    5167  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    5168  uint8_t m_Type; // ALLOCATION_TYPE
    5169  uint8_t m_SuballocationType; // VmaSuballocationType
    5170  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    5171  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
    5172  uint8_t m_MapCount;
    5173  uint8_t m_Flags; // enum FLAGS
    5174 
    5175  // Allocation out of VmaDeviceMemoryBlock.
    5176  struct BlockAllocation
    5177  {
    5178  VmaDeviceMemoryBlock* m_Block;
    5179  VkDeviceSize m_Offset;
    5180  bool m_CanBecomeLost;
    5181  };
    5182 
    5183  // Allocation for an object that has its own private VkDeviceMemory.
    5184  struct DedicatedAllocation
    5185  {
    5186  uint32_t m_MemoryTypeIndex;
    5187  VkDeviceMemory m_hMemory;
    5188  void* m_pMappedData; // Not null means memory is mapped.
    5189  };
    5190 
    5191  union
    5192  {
    5193  // Allocation out of VmaDeviceMemoryBlock.
    5194  BlockAllocation m_BlockAllocation;
    5195  // Allocation for an object that has its own private VkDeviceMemory.
    5196  DedicatedAllocation m_DedicatedAllocation;
    5197  };
    5198 
    5199 #if VMA_STATS_STRING_ENABLED
    5200  uint32_t m_CreationFrameIndex;
    5201  uint32_t m_BufferImageUsage; // 0 if unknown.
    5202 #endif
    5203 
    5204  void FreeUserDataString(VmaAllocator hAllocator);
    5205 };
    5206 
    5207 /*
    5208 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    5209 allocated memory block or free.
    5210 */
    5211 struct VmaSuballocation
    5212 {
    5213  VkDeviceSize offset;
    5214  VkDeviceSize size;
    5215  VmaAllocation hAllocation;
    5216  VmaSuballocationType type;
    5217 };
    5218 
    5219 // Comparator for offsets.
    5220 struct VmaSuballocationOffsetLess
    5221 {
    5222  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    5223  {
    5224  return lhs.offset < rhs.offset;
    5225  }
    5226 };
    5227 struct VmaSuballocationOffsetGreater
    5228 {
    5229  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    5230  {
    5231  return lhs.offset > rhs.offset;
    5232  }
    5233 };
    5234 
    5235 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    5236 
    5237 // Cost of one additional allocation lost, as equivalent in bytes.
    5238 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    5239 
    5240 enum class VmaAllocationRequestType
    5241 {
    5242  Normal,
    5243  // Used by "Linear" algorithm.
    5244  UpperAddress,
    5245  EndOf1st,
    5246  EndOf2nd,
    5247 };
    5248 
    5249 /*
    5250 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    5251 
    5252 If canMakeOtherLost was false:
    5253 - item points to a FREE suballocation.
    5254 - itemsToMakeLostCount is 0.
    5255 
    5256 If canMakeOtherLost was true:
    5257 - item points to first of sequence of suballocations, which are either FREE,
    5258  or point to VmaAllocations that can become lost.
    5259 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    5260  the requested allocation to succeed.
    5261 */
    5262 struct VmaAllocationRequest
    5263 {
    5264  VkDeviceSize offset;
    5265  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    5266  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    5267  VmaSuballocationList::iterator item;
    5268  size_t itemsToMakeLostCount;
    5269  void* customData;
    5270  VmaAllocationRequestType type;
    5271 
    5272  VkDeviceSize CalcCost() const
    5273  {
    5274  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    5275  }
    5276 };
    5277 
    5278 /*
    5279 Data structure used for bookkeeping of allocations and unused ranges of memory
    5280 in a single VkDeviceMemory block.
    5281 */
    5282 class VmaBlockMetadata
    5283 {
    5284 public:
    5285  VmaBlockMetadata(VmaAllocator hAllocator);
    5286  virtual ~VmaBlockMetadata() { }
    5287  virtual void Init(VkDeviceSize size) { m_Size = size; }
    5288 
    5289  // Validates all data structures inside this object. If not valid, returns false.
    5290  virtual bool Validate() const = 0;
    5291  VkDeviceSize GetSize() const { return m_Size; }
    5292  virtual size_t GetAllocationCount() const = 0;
    5293  virtual VkDeviceSize GetSumFreeSize() const = 0;
    5294  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
    5295  // Returns true if this block is empty - contains only single free suballocation.
    5296  virtual bool IsEmpty() const = 0;
    5297 
    5298  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
    5299  // Shouldn't modify blockCount.
    5300  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
    5301 
    5302 #if VMA_STATS_STRING_ENABLED
    5303  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
    5304 #endif
    5305 
    5306  // Tries to find a place for suballocation with given parameters inside this block.
    5307  // If succeeded, fills pAllocationRequest and returns true.
    5308  // If failed, returns false.
    5309  virtual bool CreateAllocationRequest(
    5310  uint32_t currentFrameIndex,
    5311  uint32_t frameInUseCount,
    5312  VkDeviceSize bufferImageGranularity,
    5313  VkDeviceSize allocSize,
    5314  VkDeviceSize allocAlignment,
    5315  bool upperAddress,
    5316  VmaSuballocationType allocType,
    5317  bool canMakeOtherLost,
    5318  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
    5319  uint32_t strategy,
    5320  VmaAllocationRequest* pAllocationRequest) = 0;
    5321 
    5322  virtual bool MakeRequestedAllocationsLost(
    5323  uint32_t currentFrameIndex,
    5324  uint32_t frameInUseCount,
    5325  VmaAllocationRequest* pAllocationRequest) = 0;
    5326 
    5327  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
    5328 
    5329  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
    5330 
    5331  // Makes actual allocation based on request. Request must already be checked and valid.
    5332  virtual void Alloc(
    5333  const VmaAllocationRequest& request,
    5334  VmaSuballocationType type,
    5335  VkDeviceSize allocSize,
    5336  VmaAllocation hAllocation) = 0;
    5337 
    5338  // Frees suballocation assigned to given memory region.
    5339  virtual void Free(const VmaAllocation allocation) = 0;
    5340  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
    5341 
    5342  // Tries to resize (grow or shrink) space for given allocation, in place.
    5343  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
    5344 
    5345 protected:
    5346  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
    5347 
    5348 #if VMA_STATS_STRING_ENABLED
    5349  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
    5350  VkDeviceSize unusedBytes,
    5351  size_t allocationCount,
    5352  size_t unusedRangeCount) const;
    5353  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    5354  VkDeviceSize offset,
    5355  VmaAllocation hAllocation) const;
    5356  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    5357  VkDeviceSize offset,
    5358  VkDeviceSize size) const;
    5359  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
    5360 #endif
    5361 
    5362 private:
    5363  VkDeviceSize m_Size;
    5364  const VkAllocationCallbacks* m_pAllocationCallbacks;
    5365 };
    5366 
    5367 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
    5368  VMA_ASSERT(0 && "Validation failed: " #cond); \
    5369  return false; \
    5370  } } while(false)
    5371 
    5372 class VmaBlockMetadata_Generic : public VmaBlockMetadata
    5373 {
    5374  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
    5375 public:
    5376  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
    5377  virtual ~VmaBlockMetadata_Generic();
    5378  virtual void Init(VkDeviceSize size);
    5379 
    5380  virtual bool Validate() const;
    5381  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    5382  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    5383  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5384  virtual bool IsEmpty() const;
    5385 
    5386  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5387  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5388 
    5389 #if VMA_STATS_STRING_ENABLED
    5390  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5391 #endif
    5392 
    5393  virtual bool CreateAllocationRequest(
    5394  uint32_t currentFrameIndex,
    5395  uint32_t frameInUseCount,
    5396  VkDeviceSize bufferImageGranularity,
    5397  VkDeviceSize allocSize,
    5398  VkDeviceSize allocAlignment,
    5399  bool upperAddress,
    5400  VmaSuballocationType allocType,
    5401  bool canMakeOtherLost,
    5402  uint32_t strategy,
    5403  VmaAllocationRequest* pAllocationRequest);
    5404 
    5405  virtual bool MakeRequestedAllocationsLost(
    5406  uint32_t currentFrameIndex,
    5407  uint32_t frameInUseCount,
    5408  VmaAllocationRequest* pAllocationRequest);
    5409 
    5410  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5411 
    5412  virtual VkResult CheckCorruption(const void* pBlockData);
    5413 
    5414  virtual void Alloc(
    5415  const VmaAllocationRequest& request,
    5416  VmaSuballocationType type,
    5417  VkDeviceSize allocSize,
    5418  VmaAllocation hAllocation);
    5419 
    5420  virtual void Free(const VmaAllocation allocation);
    5421  virtual void FreeAtOffset(VkDeviceSize offset);
    5422 
    5423  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
    5424 
    5426  // For defragmentation
    5427 
    5428  bool IsBufferImageGranularityConflictPossible(
    5429  VkDeviceSize bufferImageGranularity,
    5430  VmaSuballocationType& inOutPrevSuballocType) const;
    5431 
    5432 private:
    5433  friend class VmaDefragmentationAlgorithm_Generic;
    5434  friend class VmaDefragmentationAlgorithm_Fast;
    5435 
    5436  uint32_t m_FreeCount;
    5437  VkDeviceSize m_SumFreeSize;
    5438  VmaSuballocationList m_Suballocations;
    5439  // Suballocations that are free and have size greater than certain threshold.
    5440  // Sorted by size, ascending.
    5441  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    5442 
    5443  bool ValidateFreeSuballocationList() const;
    5444 
    5445  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    5446  // If yes, fills pOffset and returns true. If no, returns false.
    5447  bool CheckAllocation(
    5448  uint32_t currentFrameIndex,
    5449  uint32_t frameInUseCount,
    5450  VkDeviceSize bufferImageGranularity,
    5451  VkDeviceSize allocSize,
    5452  VkDeviceSize allocAlignment,
    5453  VmaSuballocationType allocType,
    5454  VmaSuballocationList::const_iterator suballocItem,
    5455  bool canMakeOtherLost,
    5456  VkDeviceSize* pOffset,
    5457  size_t* itemsToMakeLostCount,
    5458  VkDeviceSize* pSumFreeSize,
    5459  VkDeviceSize* pSumItemSize) const;
    5460  // Given free suballocation, it merges it with following one, which must also be free.
    5461  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    5462  // Releases given suballocation, making it free.
    5463  // Merges it with adjacent free suballocations if applicable.
    5464  // Returns iterator to new free suballocation at this place.
    5465  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    5466  // Given free suballocation, it inserts it into sorted list of
    5467  // m_FreeSuballocationsBySize if it's suitable.
    5468  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    5469  // Given free suballocation, it removes it from sorted list of
    5470  // m_FreeSuballocationsBySize if it's suitable.
    5471  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    5472 };
    5473 
    5474 /*
    5475 Allocations and their references in internal data structure look like this:
    5476 
    5477 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
    5478 
    5479  0 +-------+
    5480  | |
    5481  | |
    5482  | |
    5483  +-------+
    5484  | Alloc | 1st[m_1stNullItemsBeginCount]
    5485  +-------+
    5486  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    5487  +-------+
    5488  | ... |
    5489  +-------+
    5490  | Alloc | 1st[1st.size() - 1]
    5491  +-------+
    5492  | |
    5493  | |
    5494  | |
    5495 GetSize() +-------+
    5496 
    5497 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
    5498 
    5499  0 +-------+
    5500  | Alloc | 2nd[0]
    5501  +-------+
    5502  | Alloc | 2nd[1]
    5503  +-------+
    5504  | ... |
    5505  +-------+
    5506  | Alloc | 2nd[2nd.size() - 1]
    5507  +-------+
    5508  | |
    5509  | |
    5510  | |
    5511  +-------+
    5512  | Alloc | 1st[m_1stNullItemsBeginCount]
    5513  +-------+
    5514  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    5515  +-------+
    5516  | ... |
    5517  +-------+
    5518  | Alloc | 1st[1st.size() - 1]
    5519  +-------+
    5520  | |
    5521 GetSize() +-------+
    5522 
    5523 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
    5524 
    5525  0 +-------+
    5526  | |
    5527  | |
    5528  | |
    5529  +-------+
    5530  | Alloc | 1st[m_1stNullItemsBeginCount]
    5531  +-------+
    5532  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    5533  +-------+
    5534  | ... |
    5535  +-------+
    5536  | Alloc | 1st[1st.size() - 1]
    5537  +-------+
    5538  | |
    5539  | |
    5540  | |
    5541  +-------+
    5542  | Alloc | 2nd[2nd.size() - 1]
    5543  +-------+
    5544  | ... |
    5545  +-------+
    5546  | Alloc | 2nd[1]
    5547  +-------+
    5548  | Alloc | 2nd[0]
    5549 GetSize() +-------+
    5550 
    5551 */
    5552 class VmaBlockMetadata_Linear : public VmaBlockMetadata
    5553 {
    5554  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
    5555 public:
    5556  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
    5557  virtual ~VmaBlockMetadata_Linear();
    5558  virtual void Init(VkDeviceSize size);
    5559 
    5560  virtual bool Validate() const;
    5561  virtual size_t GetAllocationCount() const;
    5562  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    5563  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5564  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
    5565 
    5566  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5567  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5568 
    5569 #if VMA_STATS_STRING_ENABLED
    5570  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5571 #endif
    5572 
    5573  virtual bool CreateAllocationRequest(
    5574  uint32_t currentFrameIndex,
    5575  uint32_t frameInUseCount,
    5576  VkDeviceSize bufferImageGranularity,
    5577  VkDeviceSize allocSize,
    5578  VkDeviceSize allocAlignment,
    5579  bool upperAddress,
    5580  VmaSuballocationType allocType,
    5581  bool canMakeOtherLost,
    5582  uint32_t strategy,
    5583  VmaAllocationRequest* pAllocationRequest);
    5584 
    5585  virtual bool MakeRequestedAllocationsLost(
    5586  uint32_t currentFrameIndex,
    5587  uint32_t frameInUseCount,
    5588  VmaAllocationRequest* pAllocationRequest);
    5589 
    5590  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5591 
    5592  virtual VkResult CheckCorruption(const void* pBlockData);
    5593 
    5594  virtual void Alloc(
    5595  const VmaAllocationRequest& request,
    5596  VmaSuballocationType type,
    5597  VkDeviceSize allocSize,
    5598  VmaAllocation hAllocation);
    5599 
    5600  virtual void Free(const VmaAllocation allocation);
    5601  virtual void FreeAtOffset(VkDeviceSize offset);
    5602 
    5603 private:
    5604  /*
    5605  There are two suballocation vectors, used in ping-pong way.
    5606  The one with index m_1stVectorIndex is called 1st.
    5607  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
    5608  2nd can be non-empty only when 1st is not empty.
    5609  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
    5610  */
    5611  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
    5612 
    5613  enum SECOND_VECTOR_MODE
    5614  {
    5615  SECOND_VECTOR_EMPTY,
    5616  /*
    5617  Suballocations in 2nd vector are created later than the ones in 1st, but they
    5618  all have smaller offset.
    5619  */
    5620  SECOND_VECTOR_RING_BUFFER,
    5621  /*
    5622  Suballocations in 2nd vector are upper side of double stack.
    5623  They all have offsets higher than those in 1st vector.
    5624  Top of this stack means smaller offsets, but higher indices in this vector.
    5625  */
    5626  SECOND_VECTOR_DOUBLE_STACK,
    5627  };
    5628 
    5629  VkDeviceSize m_SumFreeSize;
    5630  SuballocationVectorType m_Suballocations0, m_Suballocations1;
    5631  uint32_t m_1stVectorIndex;
    5632  SECOND_VECTOR_MODE m_2ndVectorMode;
    5633 
    5634  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5635  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5636  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5637  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5638 
    5639  // Number of items in 1st vector with hAllocation = null at the beginning.
    5640  size_t m_1stNullItemsBeginCount;
    5641  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
    5642  size_t m_1stNullItemsMiddleCount;
    5643  // Number of items in 2nd vector with hAllocation = null.
    5644  size_t m_2ndNullItemsCount;
    5645 
    5646  bool ShouldCompact1st() const;
    5647  void CleanupAfterFree();
    5648 
    5649  bool CreateAllocationRequest_LowerAddress(
    5650  uint32_t currentFrameIndex,
    5651  uint32_t frameInUseCount,
    5652  VkDeviceSize bufferImageGranularity,
    5653  VkDeviceSize allocSize,
    5654  VkDeviceSize allocAlignment,
    5655  VmaSuballocationType allocType,
    5656  bool canMakeOtherLost,
    5657  uint32_t strategy,
    5658  VmaAllocationRequest* pAllocationRequest);
    5659  bool CreateAllocationRequest_UpperAddress(
    5660  uint32_t currentFrameIndex,
    5661  uint32_t frameInUseCount,
    5662  VkDeviceSize bufferImageGranularity,
    5663  VkDeviceSize allocSize,
    5664  VkDeviceSize allocAlignment,
    5665  VmaSuballocationType allocType,
    5666  bool canMakeOtherLost,
    5667  uint32_t strategy,
    5668  VmaAllocationRequest* pAllocationRequest);
    5669 };
    5670 
    5671 /*
    5672 - GetSize() is the original size of allocated memory block.
    5673 - m_UsableSize is this size aligned down to a power of two.
    5674  All allocations and calculations happen relative to m_UsableSize.
    5675 - GetUnusableSize() is the difference between them.
    5676  It is repoted as separate, unused range, not available for allocations.
    5677 
    5678 Node at level 0 has size = m_UsableSize.
    5679 Each next level contains nodes with size 2 times smaller than current level.
    5680 m_LevelCount is the maximum number of levels to use in the current object.
    5681 */
    5682 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
    5683 {
    5684  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
    5685 public:
    5686  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
    5687  virtual ~VmaBlockMetadata_Buddy();
    5688  virtual void Init(VkDeviceSize size);
    5689 
    5690  virtual bool Validate() const;
    5691  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
    5692  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
    5693  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5694  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
    5695 
    5696  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5697  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5698 
    5699 #if VMA_STATS_STRING_ENABLED
    5700  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5701 #endif
    5702 
    5703  virtual bool CreateAllocationRequest(
    5704  uint32_t currentFrameIndex,
    5705  uint32_t frameInUseCount,
    5706  VkDeviceSize bufferImageGranularity,
    5707  VkDeviceSize allocSize,
    5708  VkDeviceSize allocAlignment,
    5709  bool upperAddress,
    5710  VmaSuballocationType allocType,
    5711  bool canMakeOtherLost,
    5712  uint32_t strategy,
    5713  VmaAllocationRequest* pAllocationRequest);
    5714 
    5715  virtual bool MakeRequestedAllocationsLost(
    5716  uint32_t currentFrameIndex,
    5717  uint32_t frameInUseCount,
    5718  VmaAllocationRequest* pAllocationRequest);
    5719 
    5720  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5721 
    5722  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
    5723 
    5724  virtual void Alloc(
    5725  const VmaAllocationRequest& request,
    5726  VmaSuballocationType type,
    5727  VkDeviceSize allocSize,
    5728  VmaAllocation hAllocation);
    5729 
    5730  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
    5731  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
    5732 
    5733 private:
    5734  static const VkDeviceSize MIN_NODE_SIZE = 32;
    5735  static const size_t MAX_LEVELS = 30;
    5736 
    5737  struct ValidationContext
    5738  {
    5739  size_t calculatedAllocationCount;
    5740  size_t calculatedFreeCount;
    5741  VkDeviceSize calculatedSumFreeSize;
    5742 
    5743  ValidationContext() :
    5744  calculatedAllocationCount(0),
    5745  calculatedFreeCount(0),
    5746  calculatedSumFreeSize(0) { }
    5747  };
    5748 
    5749  struct Node
    5750  {
    5751  VkDeviceSize offset;
    5752  enum TYPE
    5753  {
    5754  TYPE_FREE,
    5755  TYPE_ALLOCATION,
    5756  TYPE_SPLIT,
    5757  TYPE_COUNT
    5758  } type;
    5759  Node* parent;
    5760  Node* buddy;
    5761 
    5762  union
    5763  {
    5764  struct
    5765  {
    5766  Node* prev;
    5767  Node* next;
    5768  } free;
    5769  struct
    5770  {
    5771  VmaAllocation alloc;
    5772  } allocation;
    5773  struct
    5774  {
    5775  Node* leftChild;
    5776  } split;
    5777  };
    5778  };
    5779 
    5780  // Size of the memory block aligned down to a power of two.
    5781  VkDeviceSize m_UsableSize;
    5782  uint32_t m_LevelCount;
    5783 
    5784  Node* m_Root;
    5785  struct {
    5786  Node* front;
    5787  Node* back;
    5788  } m_FreeList[MAX_LEVELS];
    5789  // Number of nodes in the tree with type == TYPE_ALLOCATION.
    5790  size_t m_AllocationCount;
    5791  // Number of nodes in the tree with type == TYPE_FREE.
    5792  size_t m_FreeCount;
    5793  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
    5794  VkDeviceSize m_SumFreeSize;
    5795 
    5796  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
    5797  void DeleteNode(Node* node);
    5798  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
    5799  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
    5800  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
    5801  // Alloc passed just for validation. Can be null.
    5802  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
    5803  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
    5804  // Adds node to the front of FreeList at given level.
    5805  // node->type must be FREE.
    5806  // node->free.prev, next can be undefined.
    5807  void AddToFreeListFront(uint32_t level, Node* node);
    5808  // Removes node from FreeList at given level.
    5809  // node->type must be FREE.
    5810  // node->free.prev, next stay untouched.
    5811  void RemoveFromFreeList(uint32_t level, Node* node);
    5812 
    5813 #if VMA_STATS_STRING_ENABLED
    5814  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
    5815 #endif
    5816 };
    5817 
    5818 /*
    5819 Represents a single block of device memory (`VkDeviceMemory`) with all the
    5820 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
    5821 
    5822 Thread-safety: This class must be externally synchronized.
    5823 */
    5824 class VmaDeviceMemoryBlock
    5825 {
    5826  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
    5827 public:
    5828  VmaBlockMetadata* m_pMetadata;
    5829 
    5830  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    5831 
    5832  ~VmaDeviceMemoryBlock()
    5833  {
    5834  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    5835  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5836  }
    5837 
    5838  // Always call after construction.
    5839  void Init(
    5840  VmaAllocator hAllocator,
    5841  VmaPool hParentPool,
    5842  uint32_t newMemoryTypeIndex,
    5843  VkDeviceMemory newMemory,
    5844  VkDeviceSize newSize,
    5845  uint32_t id,
    5846  uint32_t algorithm);
    5847  // Always call before destruction.
    5848  void Destroy(VmaAllocator allocator);
    5849 
    5850  VmaPool GetParentPool() const { return m_hParentPool; }
    5851  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
    5852  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5853  uint32_t GetId() const { return m_Id; }
    5854  void* GetMappedData() const { return m_pMappedData; }
    5855 
    5856  // Validates all data structures inside this object. If not valid, returns false.
    5857  bool Validate() const;
    5858 
    5859  VkResult CheckCorruption(VmaAllocator hAllocator);
    5860 
    5861  // ppData can be null.
    5862  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
    5863  void Unmap(VmaAllocator hAllocator, uint32_t count);
    5864 
    5865  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5866  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5867 
    5868  VkResult BindBufferMemory(
    5869  const VmaAllocator hAllocator,
    5870  const VmaAllocation hAllocation,
    5871  VkBuffer hBuffer);
    5872  VkResult BindImageMemory(
    5873  const VmaAllocator hAllocator,
    5874  const VmaAllocation hAllocation,
    5875  VkImage hImage);
    5876 
    5877 private:
    5878  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
    5879  uint32_t m_MemoryTypeIndex;
    5880  uint32_t m_Id;
    5881  VkDeviceMemory m_hMemory;
    5882 
    5883  /*
    5884  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
    5885  Also protects m_MapCount, m_pMappedData.
    5886  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
    5887  */
    5888  VMA_MUTEX m_Mutex;
    5889  uint32_t m_MapCount;
    5890  void* m_pMappedData;
    5891 };
    5892 
    5893 struct VmaPointerLess
    5894 {
    5895  bool operator()(const void* lhs, const void* rhs) const
    5896  {
    5897  return lhs < rhs;
    5898  }
    5899 };
    5900 
    5901 struct VmaDefragmentationMove
    5902 {
    5903  size_t srcBlockIndex;
    5904  size_t dstBlockIndex;
    5905  VkDeviceSize srcOffset;
    5906  VkDeviceSize dstOffset;
    5907  VkDeviceSize size;
    5908 };
    5909 
    5910 class VmaDefragmentationAlgorithm;
    5911 
    5912 /*
    5913 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    5914 Vulkan memory type.
    5915 
    5916 Synchronized internally with a mutex.
    5917 */
    5918 struct VmaBlockVector
    5919 {
    5920  VMA_CLASS_NO_COPY(VmaBlockVector)
    5921 public:
    5922  VmaBlockVector(
    5923  VmaAllocator hAllocator,
    5924  VmaPool hParentPool,
    5925  uint32_t memoryTypeIndex,
    5926  VkDeviceSize preferredBlockSize,
    5927  size_t minBlockCount,
    5928  size_t maxBlockCount,
    5929  VkDeviceSize bufferImageGranularity,
    5930  uint32_t frameInUseCount,
    5931  bool isCustomPool,
    5932  bool explicitBlockSize,
    5933  uint32_t algorithm);
    5934  ~VmaBlockVector();
    5935 
    5936  VkResult CreateMinBlocks();
    5937 
    5938  VmaPool GetParentPool() const { return m_hParentPool; }
    5939  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5940  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    5941  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    5942  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    5943  uint32_t GetAlgorithm() const { return m_Algorithm; }
    5944 
    5945  void GetPoolStats(VmaPoolStats* pStats);
    5946 
    5947  bool IsEmpty() const { return m_Blocks.empty(); }
    5948  bool IsCorruptionDetectionEnabled() const;
    5949 
    5950  VkResult Allocate(
    5951  uint32_t currentFrameIndex,
    5952  VkDeviceSize size,
    5953  VkDeviceSize alignment,
    5954  const VmaAllocationCreateInfo& createInfo,
    5955  VmaSuballocationType suballocType,
    5956  size_t allocationCount,
    5957  VmaAllocation* pAllocations);
    5958 
    5959  void Free(
    5960  VmaAllocation hAllocation);
    5961 
    5962  // Adds statistics of this BlockVector to pStats.
    5963  void AddStats(VmaStats* pStats);
    5964 
    5965 #if VMA_STATS_STRING_ENABLED
    5966  void PrintDetailedMap(class VmaJsonWriter& json);
    5967 #endif
    5968 
    5969  void MakePoolAllocationsLost(
    5970  uint32_t currentFrameIndex,
    5971  size_t* pLostAllocationCount);
    5972  VkResult CheckCorruption();
    5973 
    5974  // Saves results in pCtx->res.
    5975  void Defragment(
    5976  class VmaBlockVectorDefragmentationContext* pCtx,
    5977  VmaDefragmentationStats* pStats,
    5978  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
    5979  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
    5980  VkCommandBuffer commandBuffer);
    5981  void DefragmentationEnd(
    5982  class VmaBlockVectorDefragmentationContext* pCtx,
    5983  VmaDefragmentationStats* pStats);
    5984 
    5986  // To be used only while the m_Mutex is locked. Used during defragmentation.
    5987 
    5988  size_t GetBlockCount() const { return m_Blocks.size(); }
    5989  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
    5990  size_t CalcAllocationCount() const;
    5991  bool IsBufferImageGranularityConflictPossible() const;
    5992 
    5993 private:
    5994  friend class VmaDefragmentationAlgorithm_Generic;
    5995 
    5996  const VmaAllocator m_hAllocator;
    5997  const VmaPool m_hParentPool;
    5998  const uint32_t m_MemoryTypeIndex;
    5999  const VkDeviceSize m_PreferredBlockSize;
    6000  const size_t m_MinBlockCount;
    6001  const size_t m_MaxBlockCount;
    6002  const VkDeviceSize m_BufferImageGranularity;
    6003  const uint32_t m_FrameInUseCount;
    6004  const bool m_IsCustomPool;
    6005  const bool m_ExplicitBlockSize;
    6006  const uint32_t m_Algorithm;
    6007  /* There can be at most one allocation that is completely empty - a
    6008  hysteresis to avoid pessimistic case of alternating creation and destruction
    6009  of a VkDeviceMemory. */
    6010  bool m_HasEmptyBlock;
    6011  VMA_RW_MUTEX m_Mutex;
    6012  // Incrementally sorted by sumFreeSize, ascending.
    6013  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    6014  uint32_t m_NextBlockId;
    6015 
    6016  VkDeviceSize CalcMaxBlockSize() const;
    6017 
    6018  // Finds and removes given block from vector.
    6019  void Remove(VmaDeviceMemoryBlock* pBlock);
    6020 
    6021  // Performs single step in sorting m_Blocks. They may not be fully sorted
    6022  // after this call.
    6023  void IncrementallySortBlocks();
    6024 
    6025  VkResult AllocatePage(
    6026  uint32_t currentFrameIndex,
    6027  VkDeviceSize size,
    6028  VkDeviceSize alignment,
    6029  const VmaAllocationCreateInfo& createInfo,
    6030  VmaSuballocationType suballocType,
    6031  VmaAllocation* pAllocation);
    6032 
    6033  // To be used only without CAN_MAKE_OTHER_LOST flag.
    6034  VkResult AllocateFromBlock(
    6035  VmaDeviceMemoryBlock* pBlock,
    6036  uint32_t currentFrameIndex,
    6037  VkDeviceSize size,
    6038  VkDeviceSize alignment,
    6039  VmaAllocationCreateFlags allocFlags,
    6040  void* pUserData,
    6041  VmaSuballocationType suballocType,
    6042  uint32_t strategy,
    6043  VmaAllocation* pAllocation);
    6044 
    6045  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    6046 
    6047  // Saves result to pCtx->res.
    6048  void ApplyDefragmentationMovesCpu(
    6049  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    6050  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
    6051  // Saves result to pCtx->res.
    6052  void ApplyDefragmentationMovesGpu(
    6053  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    6054  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6055  VkCommandBuffer commandBuffer);
    6056 
    6057  /*
    6058  Used during defragmentation. pDefragmentationStats is optional. It's in/out
    6059  - updated with new data.
    6060  */
    6061  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
    6062 };
    6063 
    6064 struct VmaPool_T
    6065 {
    6066  VMA_CLASS_NO_COPY(VmaPool_T)
    6067 public:
    6068  VmaBlockVector m_BlockVector;
    6069 
    6070  VmaPool_T(
    6071  VmaAllocator hAllocator,
    6072  const VmaPoolCreateInfo& createInfo,
    6073  VkDeviceSize preferredBlockSize);
    6074  ~VmaPool_T();
    6075 
    6076  uint32_t GetId() const { return m_Id; }
    6077  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
    6078 
    6079 #if VMA_STATS_STRING_ENABLED
    6080  //void PrintDetailedMap(class VmaStringBuilder& sb);
    6081 #endif
    6082 
    6083 private:
    6084  uint32_t m_Id;
    6085 };
    6086 
    6087 /*
    6088 Performs defragmentation:
    6089 
    6090 - Updates `pBlockVector->m_pMetadata`.
    6091 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
    6092 - Does not move actual data, only returns requested moves as `moves`.
    6093 */
    6094 class VmaDefragmentationAlgorithm
    6095 {
    6096  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
    6097 public:
    6098  VmaDefragmentationAlgorithm(
    6099  VmaAllocator hAllocator,
    6100  VmaBlockVector* pBlockVector,
    6101  uint32_t currentFrameIndex) :
    6102  m_hAllocator(hAllocator),
    6103  m_pBlockVector(pBlockVector),
    6104  m_CurrentFrameIndex(currentFrameIndex)
    6105  {
    6106  }
    6107  virtual ~VmaDefragmentationAlgorithm()
    6108  {
    6109  }
    6110 
    6111  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
    6112  virtual void AddAll() = 0;
    6113 
    6114  virtual VkResult Defragment(
    6115  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6116  VkDeviceSize maxBytesToMove,
    6117  uint32_t maxAllocationsToMove) = 0;
    6118 
    6119  virtual VkDeviceSize GetBytesMoved() const = 0;
    6120  virtual uint32_t GetAllocationsMoved() const = 0;
    6121 
    6122 protected:
    6123  VmaAllocator const m_hAllocator;
    6124  VmaBlockVector* const m_pBlockVector;
    6125  const uint32_t m_CurrentFrameIndex;
    6126 
    6127  struct AllocationInfo
    6128  {
    6129  VmaAllocation m_hAllocation;
    6130  VkBool32* m_pChanged;
    6131 
    6132  AllocationInfo() :
    6133  m_hAllocation(VK_NULL_HANDLE),
    6134  m_pChanged(VMA_NULL)
    6135  {
    6136  }
    6137  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
    6138  m_hAllocation(hAlloc),
    6139  m_pChanged(pChanged)
    6140  {
    6141  }
    6142  };
    6143 };
    6144 
    6145 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
    6146 {
    6147  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
    6148 public:
    6149  VmaDefragmentationAlgorithm_Generic(
    6150  VmaAllocator hAllocator,
    6151  VmaBlockVector* pBlockVector,
    6152  uint32_t currentFrameIndex,
    6153  bool overlappingMoveSupported);
    6154  virtual ~VmaDefragmentationAlgorithm_Generic();
    6155 
    6156  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    6157  virtual void AddAll() { m_AllAllocations = true; }
    6158 
    6159  virtual VkResult Defragment(
    6160  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6161  VkDeviceSize maxBytesToMove,
    6162  uint32_t maxAllocationsToMove);
    6163 
    6164  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    6165  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    6166 
    6167 private:
    6168  uint32_t m_AllocationCount;
    6169  bool m_AllAllocations;
    6170 
    6171  VkDeviceSize m_BytesMoved;
    6172  uint32_t m_AllocationsMoved;
    6173 
    6174  struct AllocationInfoSizeGreater
    6175  {
    6176  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    6177  {
    6178  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    6179  }
    6180  };
    6181 
    6182  struct AllocationInfoOffsetGreater
    6183  {
    6184  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    6185  {
    6186  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
    6187  }
    6188  };
    6189 
    6190  struct BlockInfo
    6191  {
    6192  size_t m_OriginalBlockIndex;
    6193  VmaDeviceMemoryBlock* m_pBlock;
    6194  bool m_HasNonMovableAllocations;
    6195  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    6196 
    6197  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    6198  m_OriginalBlockIndex(SIZE_MAX),
    6199  m_pBlock(VMA_NULL),
    6200  m_HasNonMovableAllocations(true),
    6201  m_Allocations(pAllocationCallbacks)
    6202  {
    6203  }
    6204 
    6205  void CalcHasNonMovableAllocations()
    6206  {
    6207  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
    6208  const size_t defragmentAllocCount = m_Allocations.size();
    6209  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    6210  }
    6211 
    6212  void SortAllocationsBySizeDescending()
    6213  {
    6214  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    6215  }
    6216 
    6217  void SortAllocationsByOffsetDescending()
    6218  {
    6219  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
    6220  }
    6221  };
    6222 
    6223  struct BlockPointerLess
    6224  {
    6225  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    6226  {
    6227  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    6228  }
    6229  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    6230  {
    6231  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    6232  }
    6233  };
    6234 
    6235  // 1. Blocks with some non-movable allocations go first.
    6236  // 2. Blocks with smaller sumFreeSize go first.
    6237  struct BlockInfoCompareMoveDestination
    6238  {
    6239  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    6240  {
    6241  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    6242  {
    6243  return true;
    6244  }
    6245  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    6246  {
    6247  return false;
    6248  }
    6249  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
    6250  {
    6251  return true;
    6252  }
    6253  return false;
    6254  }
    6255  };
    6256 
    6257  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    6258  BlockInfoVector m_Blocks;
    6259 
    6260  VkResult DefragmentRound(
    6261  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6262  VkDeviceSize maxBytesToMove,
    6263  uint32_t maxAllocationsToMove);
    6264 
    6265  size_t CalcBlocksWithNonMovableCount() const;
    6266 
    6267  static bool MoveMakesSense(
    6268  size_t dstBlockIndex, VkDeviceSize dstOffset,
    6269  size_t srcBlockIndex, VkDeviceSize srcOffset);
    6270 };
    6271 
    6272 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
    6273 {
    6274  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
    6275 public:
    6276  VmaDefragmentationAlgorithm_Fast(
    6277  VmaAllocator hAllocator,
    6278  VmaBlockVector* pBlockVector,
    6279  uint32_t currentFrameIndex,
    6280  bool overlappingMoveSupported);
    6281  virtual ~VmaDefragmentationAlgorithm_Fast();
    6282 
    6283  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
    6284  virtual void AddAll() { m_AllAllocations = true; }
    6285 
    6286  virtual VkResult Defragment(
    6287  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6288  VkDeviceSize maxBytesToMove,
    6289  uint32_t maxAllocationsToMove);
    6290 
    6291  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    6292  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    6293 
    6294 private:
    6295  struct BlockInfo
    6296  {
    6297  size_t origBlockIndex;
    6298  };
    6299 
    6300  class FreeSpaceDatabase
    6301  {
    6302  public:
    6303  FreeSpaceDatabase()
    6304  {
    6305  FreeSpace s = {};
    6306  s.blockInfoIndex = SIZE_MAX;
    6307  for(size_t i = 0; i < MAX_COUNT; ++i)
    6308  {
    6309  m_FreeSpaces[i] = s;
    6310  }
    6311  }
    6312 
    6313  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
    6314  {
    6315  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6316  {
    6317  return;
    6318  }
    6319 
    6320  // Find first invalid or the smallest structure.
    6321  size_t bestIndex = SIZE_MAX;
    6322  for(size_t i = 0; i < MAX_COUNT; ++i)
    6323  {
    6324  // Empty structure.
    6325  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
    6326  {
    6327  bestIndex = i;
    6328  break;
    6329  }
    6330  if(m_FreeSpaces[i].size < size &&
    6331  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
    6332  {
    6333  bestIndex = i;
    6334  }
    6335  }
    6336 
    6337  if(bestIndex != SIZE_MAX)
    6338  {
    6339  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
    6340  m_FreeSpaces[bestIndex].offset = offset;
    6341  m_FreeSpaces[bestIndex].size = size;
    6342  }
    6343  }
    6344 
    6345  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
    6346  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
    6347  {
    6348  size_t bestIndex = SIZE_MAX;
    6349  VkDeviceSize bestFreeSpaceAfter = 0;
    6350  for(size_t i = 0; i < MAX_COUNT; ++i)
    6351  {
    6352  // Structure is valid.
    6353  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
    6354  {
    6355  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
    6356  // Allocation fits into this structure.
    6357  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
    6358  {
    6359  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
    6360  (dstOffset + size);
    6361  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
    6362  {
    6363  bestIndex = i;
    6364  bestFreeSpaceAfter = freeSpaceAfter;
    6365  }
    6366  }
    6367  }
    6368  }
    6369 
    6370  if(bestIndex != SIZE_MAX)
    6371  {
    6372  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
    6373  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
    6374 
    6375  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6376  {
    6377  // Leave this structure for remaining empty space.
    6378  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
    6379  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
    6380  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
    6381  }
    6382  else
    6383  {
    6384  // This structure becomes invalid.
    6385  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
    6386  }
    6387 
    6388  return true;
    6389  }
    6390 
    6391  return false;
    6392  }
    6393 
    6394  private:
    6395  static const size_t MAX_COUNT = 4;
    6396 
    6397  struct FreeSpace
    6398  {
    6399  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
    6400  VkDeviceSize offset;
    6401  VkDeviceSize size;
    6402  } m_FreeSpaces[MAX_COUNT];
    6403  };
    6404 
    6405  const bool m_OverlappingMoveSupported;
    6406 
    6407  uint32_t m_AllocationCount;
    6408  bool m_AllAllocations;
    6409 
    6410  VkDeviceSize m_BytesMoved;
    6411  uint32_t m_AllocationsMoved;
    6412 
    6413  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
    6414 
    6415  void PreprocessMetadata();
    6416  void PostprocessMetadata();
    6417  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
    6418 };
    6419 
    6420 struct VmaBlockDefragmentationContext
    6421 {
    6422  enum BLOCK_FLAG
    6423  {
    6424  BLOCK_FLAG_USED = 0x00000001,
    6425  };
    6426  uint32_t flags;
    6427  VkBuffer hBuffer;
    6428 };
    6429 
    6430 class VmaBlockVectorDefragmentationContext
    6431 {
    6432  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
    6433 public:
    6434  VkResult res;
    6435  bool mutexLocked;
    6436  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
    6437 
    6438  VmaBlockVectorDefragmentationContext(
    6439  VmaAllocator hAllocator,
    6440  VmaPool hCustomPool, // Optional.
    6441  VmaBlockVector* pBlockVector,
    6442  uint32_t currFrameIndex);
    6443  ~VmaBlockVectorDefragmentationContext();
    6444 
    6445  VmaPool GetCustomPool() const { return m_hCustomPool; }
    6446  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
    6447  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
    6448 
    6449  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    6450  void AddAll() { m_AllAllocations = true; }
    6451 
    6452  void Begin(bool overlappingMoveSupported);
    6453 
    6454 private:
    6455  const VmaAllocator m_hAllocator;
    6456  // Null if not from custom pool.
    6457  const VmaPool m_hCustomPool;
    6458  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
    6459  VmaBlockVector* const m_pBlockVector;
    6460  const uint32_t m_CurrFrameIndex;
    6461  // Owner of this object.
    6462  VmaDefragmentationAlgorithm* m_pAlgorithm;
    6463 
    6464  struct AllocInfo
    6465  {
    6466  VmaAllocation hAlloc;
    6467  VkBool32* pChanged;
    6468  };
    6469  // Used between constructor and Begin.
    6470  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
    6471  bool m_AllAllocations;
    6472 };
    6473 
    6474 struct VmaDefragmentationContext_T
    6475 {
    6476 private:
    6477  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
    6478 public:
    6479  VmaDefragmentationContext_T(
    6480  VmaAllocator hAllocator,
    6481  uint32_t currFrameIndex,
    6482  uint32_t flags,
    6483  VmaDefragmentationStats* pStats);
    6484  ~VmaDefragmentationContext_T();
    6485 
    6486  void AddPools(uint32_t poolCount, VmaPool* pPools);
    6487  void AddAllocations(
    6488  uint32_t allocationCount,
    6489  VmaAllocation* pAllocations,
    6490  VkBool32* pAllocationsChanged);
    6491 
    6492  /*
    6493  Returns:
    6494  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
    6495  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
    6496  - Negative value if error occured and object can be destroyed immediately.
    6497  */
    6498  VkResult Defragment(
    6499  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
    6500  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
    6501  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
    6502 
    6503 private:
    6504  const VmaAllocator m_hAllocator;
    6505  const uint32_t m_CurrFrameIndex;
    6506  const uint32_t m_Flags;
    6507  VmaDefragmentationStats* const m_pStats;
    6508  // Owner of these objects.
    6509  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
    6510  // Owner of these objects.
    6511  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
    6512 };
    6513 
    6514 #if VMA_RECORDING_ENABLED
    6515 
    6516 class VmaRecorder
    6517 {
    6518 public:
    6519  VmaRecorder();
    6520  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
    6521  void WriteConfiguration(
    6522  const VkPhysicalDeviceProperties& devProps,
    6523  const VkPhysicalDeviceMemoryProperties& memProps,
    6524  bool dedicatedAllocationExtensionEnabled);
    6525  ~VmaRecorder();
    6526 
    6527  void RecordCreateAllocator(uint32_t frameIndex);
    6528  void RecordDestroyAllocator(uint32_t frameIndex);
    6529  void RecordCreatePool(uint32_t frameIndex,
    6530  const VmaPoolCreateInfo& createInfo,
    6531  VmaPool pool);
    6532  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
    6533  void RecordAllocateMemory(uint32_t frameIndex,
    6534  const VkMemoryRequirements& vkMemReq,
    6535  const VmaAllocationCreateInfo& createInfo,
    6536  VmaAllocation allocation);
    6537  void RecordAllocateMemoryPages(uint32_t frameIndex,
    6538  const VkMemoryRequirements& vkMemReq,
    6539  const VmaAllocationCreateInfo& createInfo,
    6540  uint64_t allocationCount,
    6541  const VmaAllocation* pAllocations);
    6542  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    6543  const VkMemoryRequirements& vkMemReq,
    6544  bool requiresDedicatedAllocation,
    6545  bool prefersDedicatedAllocation,
    6546  const VmaAllocationCreateInfo& createInfo,
    6547  VmaAllocation allocation);
    6548  void RecordAllocateMemoryForImage(uint32_t frameIndex,
    6549  const VkMemoryRequirements& vkMemReq,
    6550  bool requiresDedicatedAllocation,
    6551  bool prefersDedicatedAllocation,
    6552  const VmaAllocationCreateInfo& createInfo,
    6553  VmaAllocation allocation);
    6554  void RecordFreeMemory(uint32_t frameIndex,
    6555  VmaAllocation allocation);
    6556  void RecordFreeMemoryPages(uint32_t frameIndex,
    6557  uint64_t allocationCount,
    6558  const VmaAllocation* pAllocations);
    6559  void RecordResizeAllocation(
    6560  uint32_t frameIndex,
    6561  VmaAllocation allocation,
    6562  VkDeviceSize newSize);
    6563  void RecordSetAllocationUserData(uint32_t frameIndex,
    6564  VmaAllocation allocation,
    6565  const void* pUserData);
    6566  void RecordCreateLostAllocation(uint32_t frameIndex,
    6567  VmaAllocation allocation);
    6568  void RecordMapMemory(uint32_t frameIndex,
    6569  VmaAllocation allocation);
    6570  void RecordUnmapMemory(uint32_t frameIndex,
    6571  VmaAllocation allocation);
    6572  void RecordFlushAllocation(uint32_t frameIndex,
    6573  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    6574  void RecordInvalidateAllocation(uint32_t frameIndex,
    6575  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    6576  void RecordCreateBuffer(uint32_t frameIndex,
    6577  const VkBufferCreateInfo& bufCreateInfo,
    6578  const VmaAllocationCreateInfo& allocCreateInfo,
    6579  VmaAllocation allocation);
    6580  void RecordCreateImage(uint32_t frameIndex,
    6581  const VkImageCreateInfo& imageCreateInfo,
    6582  const VmaAllocationCreateInfo& allocCreateInfo,
    6583  VmaAllocation allocation);
    6584  void RecordDestroyBuffer(uint32_t frameIndex,
    6585  VmaAllocation allocation);
    6586  void RecordDestroyImage(uint32_t frameIndex,
    6587  VmaAllocation allocation);
    6588  void RecordTouchAllocation(uint32_t frameIndex,
    6589  VmaAllocation allocation);
    6590  void RecordGetAllocationInfo(uint32_t frameIndex,
    6591  VmaAllocation allocation);
    6592  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
    6593  VmaPool pool);
    6594  void RecordDefragmentationBegin(uint32_t frameIndex,
    6595  const VmaDefragmentationInfo2& info,
    6597  void RecordDefragmentationEnd(uint32_t frameIndex,
    6599 
    6600 private:
    6601  struct CallParams
    6602  {
    6603  uint32_t threadId;
    6604  double time;
    6605  };
    6606 
    6607  class UserDataString
    6608  {
    6609  public:
    6610  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
    6611  const char* GetString() const { return m_Str; }
    6612 
    6613  private:
    6614  char m_PtrStr[17];
    6615  const char* m_Str;
    6616  };
    6617 
    6618  bool m_UseMutex;
    6619  VmaRecordFlags m_Flags;
    6620  FILE* m_File;
    6621  VMA_MUTEX m_FileMutex;
    6622  int64_t m_Freq;
    6623  int64_t m_StartCounter;
    6624 
    6625  void GetBasicParams(CallParams& outParams);
    6626 
    6627  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
    6628  template<typename T>
    6629  void PrintPointerList(uint64_t count, const T* pItems)
    6630  {
    6631  if(count)
    6632  {
    6633  fprintf(m_File, "%p", pItems[0]);
    6634  for(uint64_t i = 1; i < count; ++i)
    6635  {
    6636  fprintf(m_File, " %p", pItems[i]);
    6637  }
    6638  }
    6639  }
    6640 
    6641  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
    6642  void Flush();
    6643 };
    6644 
    6645 #endif // #if VMA_RECORDING_ENABLED
    6646 
    6647 /*
    6648 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
    6649 */
    6650 class VmaAllocationObjectAllocator
    6651 {
    6652  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
    6653 public:
    6654  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
    6655 
    6656  VmaAllocation Allocate();
    6657  void Free(VmaAllocation hAlloc);
    6658 
    6659 private:
    6660  VMA_MUTEX m_Mutex;
    6661  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
    6662 };
    6663 
    6664 // Main allocator object.
    6665 struct VmaAllocator_T
    6666 {
    6667  VMA_CLASS_NO_COPY(VmaAllocator_T)
    6668 public:
    6669  bool m_UseMutex;
    6670  bool m_UseKhrDedicatedAllocation;
    6671  VkDevice m_hDevice;
    6672  bool m_AllocationCallbacksSpecified;
    6673  VkAllocationCallbacks m_AllocationCallbacks;
    6674  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    6675  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
    6676 
    6677  // Number of bytes free out of limit, or VK_WHOLE_SIZE if no limit for that heap.
    6678  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    6679  VMA_MUTEX m_HeapSizeLimitMutex;
    6680 
    6681  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    6682  VkPhysicalDeviceMemoryProperties m_MemProps;
    6683 
    6684  // Default pools.
    6685  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    6686 
    6687  // Each vector is sorted by memory (handle value).
    6688  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    6689  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    6690  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    6691 
    6692  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    6693  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
    6694  ~VmaAllocator_T();
    6695 
    6696  const VkAllocationCallbacks* GetAllocationCallbacks() const
    6697  {
    6698  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    6699  }
    6700  const VmaVulkanFunctions& GetVulkanFunctions() const
    6701  {
    6702  return m_VulkanFunctions;
    6703  }
    6704 
    6705  VkDeviceSize GetBufferImageGranularity() const
    6706  {
    6707  return VMA_MAX(
    6708  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    6709  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    6710  }
    6711 
    6712  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    6713  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    6714 
    6715  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    6716  {
    6717  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    6718  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    6719  }
    6720  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
    6721  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
    6722  {
    6723  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
    6724  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    6725  }
    6726  // Minimum alignment for all allocations in specific memory type.
    6727  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
    6728  {
    6729  return IsMemoryTypeNonCoherent(memTypeIndex) ?
    6730  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
    6731  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
    6732  }
    6733 
    6734  bool IsIntegratedGpu() const
    6735  {
    6736  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
    6737  }
    6738 
    6739 #if VMA_RECORDING_ENABLED
    6740  VmaRecorder* GetRecorder() const { return m_pRecorder; }
    6741 #endif
    6742 
    6743  void GetBufferMemoryRequirements(
    6744  VkBuffer hBuffer,
    6745  VkMemoryRequirements& memReq,
    6746  bool& requiresDedicatedAllocation,
    6747  bool& prefersDedicatedAllocation) const;
    6748  void GetImageMemoryRequirements(
    6749  VkImage hImage,
    6750  VkMemoryRequirements& memReq,
    6751  bool& requiresDedicatedAllocation,
    6752  bool& prefersDedicatedAllocation) const;
    6753 
    6754  // Main allocation function.
    6755  VkResult AllocateMemory(
    6756  const VkMemoryRequirements& vkMemReq,
    6757  bool requiresDedicatedAllocation,
    6758  bool prefersDedicatedAllocation,
    6759  VkBuffer dedicatedBuffer,
    6760  VkImage dedicatedImage,
    6761  const VmaAllocationCreateInfo& createInfo,
    6762  VmaSuballocationType suballocType,
    6763  size_t allocationCount,
    6764  VmaAllocation* pAllocations);
    6765 
    6766  // Main deallocation function.
    6767  void FreeMemory(
    6768  size_t allocationCount,
    6769  const VmaAllocation* pAllocations);
    6770 
    6771  VkResult ResizeAllocation(
    6772  const VmaAllocation alloc,
    6773  VkDeviceSize newSize);
    6774 
    6775  void CalculateStats(VmaStats* pStats);
    6776 
    6777 #if VMA_STATS_STRING_ENABLED
    6778  void PrintDetailedMap(class VmaJsonWriter& json);
    6779 #endif
    6780 
    6781  VkResult DefragmentationBegin(
    6782  const VmaDefragmentationInfo2& info,
    6783  VmaDefragmentationStats* pStats,
    6784  VmaDefragmentationContext* pContext);
    6785  VkResult DefragmentationEnd(
    6786  VmaDefragmentationContext context);
    6787 
    6788  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    6789  bool TouchAllocation(VmaAllocation hAllocation);
    6790 
    6791  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    6792  void DestroyPool(VmaPool pool);
    6793  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    6794 
    6795  void SetCurrentFrameIndex(uint32_t frameIndex);
    6796  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
    6797 
    6798  void MakePoolAllocationsLost(
    6799  VmaPool hPool,
    6800  size_t* pLostAllocationCount);
    6801  VkResult CheckPoolCorruption(VmaPool hPool);
    6802  VkResult CheckCorruption(uint32_t memoryTypeBits);
    6803 
    6804  void CreateLostAllocation(VmaAllocation* pAllocation);
    6805 
    6806  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    6807  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    6808 
    6809  VkResult Map(VmaAllocation hAllocation, void** ppData);
    6810  void Unmap(VmaAllocation hAllocation);
    6811 
    6812  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
    6813  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
    6814 
    6815  void FlushOrInvalidateAllocation(
    6816  VmaAllocation hAllocation,
    6817  VkDeviceSize offset, VkDeviceSize size,
    6818  VMA_CACHE_OPERATION op);
    6819 
    6820  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
    6821 
    6822  /*
    6823  Returns bit mask of memory types that can support defragmentation on GPU as
    6824  they support creation of required buffer for copy operations.
    6825  */
    6826  uint32_t GetGpuDefragmentationMemoryTypeBits();
    6827 
    6828 private:
    6829  VkDeviceSize m_PreferredLargeHeapBlockSize;
    6830 
    6831  VkPhysicalDevice m_PhysicalDevice;
    6832  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    6833  VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
    6834 
    6835  VMA_RW_MUTEX m_PoolsMutex;
    6836  // Protected by m_PoolsMutex. Sorted by pointer value.
    6837  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    6838  uint32_t m_NextPoolId;
    6839 
    6840  VmaVulkanFunctions m_VulkanFunctions;
    6841 
    6842 #if VMA_RECORDING_ENABLED
    6843  VmaRecorder* m_pRecorder;
    6844 #endif
    6845 
    6846  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    6847 
    6848  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    6849 
    6850  VkResult AllocateMemoryOfType(
    6851  VkDeviceSize size,
    6852  VkDeviceSize alignment,
    6853  bool dedicatedAllocation,
    6854  VkBuffer dedicatedBuffer,
    6855  VkImage dedicatedImage,
    6856  const VmaAllocationCreateInfo& createInfo,
    6857  uint32_t memTypeIndex,
    6858  VmaSuballocationType suballocType,
    6859  size_t allocationCount,
    6860  VmaAllocation* pAllocations);
    6861 
    6862  // Helper function only to be used inside AllocateDedicatedMemory.
    6863  VkResult AllocateDedicatedMemoryPage(
    6864  VkDeviceSize size,
    6865  VmaSuballocationType suballocType,
    6866  uint32_t memTypeIndex,
    6867  const VkMemoryAllocateInfo& allocInfo,
    6868  bool map,
    6869  bool isUserDataString,
    6870  void* pUserData,
    6871  VmaAllocation* pAllocation);
    6872 
    6873  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
    6874  VkResult AllocateDedicatedMemory(
    6875  VkDeviceSize size,
    6876  VmaSuballocationType suballocType,
    6877  uint32_t memTypeIndex,
    6878  bool map,
    6879  bool isUserDataString,
    6880  void* pUserData,
    6881  VkBuffer dedicatedBuffer,
    6882  VkImage dedicatedImage,
    6883  size_t allocationCount,
    6884  VmaAllocation* pAllocations);
    6885 
    6886  void FreeDedicatedMemory(VmaAllocation allocation);
    6887 
    6888  /*
    6889  Calculates and returns bit mask of memory types that can support defragmentation
    6890  on GPU as they support creation of required buffer for copy operations.
    6891  */
    6892  uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
    6893 };
    6894 
    6896 // Memory allocation #2 after VmaAllocator_T definition
    6897 
    6898 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    6899 {
    6900  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    6901 }
    6902 
    6903 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    6904 {
    6905  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    6906 }
    6907 
    6908 template<typename T>
    6909 static T* VmaAllocate(VmaAllocator hAllocator)
    6910 {
    6911  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    6912 }
    6913 
    6914 template<typename T>
    6915 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    6916 {
    6917  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    6918 }
    6919 
    6920 template<typename T>
    6921 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    6922 {
    6923  if(ptr != VMA_NULL)
    6924  {
    6925  ptr->~T();
    6926  VmaFree(hAllocator, ptr);
    6927  }
    6928 }
    6929 
    6930 template<typename T>
    6931 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    6932 {
    6933  if(ptr != VMA_NULL)
    6934  {
    6935  for(size_t i = count; i--; )
    6936  ptr[i].~T();
    6937  VmaFree(hAllocator, ptr);
    6938  }
    6939 }
    6940 
    6942 // VmaStringBuilder
    6943 
    6944 #if VMA_STATS_STRING_ENABLED
    6945 
    6946 class VmaStringBuilder
    6947 {
    6948 public:
    6949  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    6950  size_t GetLength() const { return m_Data.size(); }
    6951  const char* GetData() const { return m_Data.data(); }
    6952 
    6953  void Add(char ch) { m_Data.push_back(ch); }
    6954  void Add(const char* pStr);
    6955  void AddNewLine() { Add('\n'); }
    6956  void AddNumber(uint32_t num);
    6957  void AddNumber(uint64_t num);
    6958  void AddPointer(const void* ptr);
    6959 
    6960 private:
    6961  VmaVector< char, VmaStlAllocator<char> > m_Data;
    6962 };
    6963 
    6964 void VmaStringBuilder::Add(const char* pStr)
    6965 {
    6966  const size_t strLen = strlen(pStr);
    6967  if(strLen > 0)
    6968  {
    6969  const size_t oldCount = m_Data.size();
    6970  m_Data.resize(oldCount + strLen);
    6971  memcpy(m_Data.data() + oldCount, pStr, strLen);
    6972  }
    6973 }
    6974 
    6975 void VmaStringBuilder::AddNumber(uint32_t num)
    6976 {
    6977  char buf[11];
    6978  VmaUint32ToStr(buf, sizeof(buf), num);
    6979  Add(buf);
    6980 }
    6981 
    6982 void VmaStringBuilder::AddNumber(uint64_t num)
    6983 {
    6984  char buf[21];
    6985  VmaUint64ToStr(buf, sizeof(buf), num);
    6986  Add(buf);
    6987 }
    6988 
    6989 void VmaStringBuilder::AddPointer(const void* ptr)
    6990 {
    6991  char buf[21];
    6992  VmaPtrToStr(buf, sizeof(buf), ptr);
    6993  Add(buf);
    6994 }
    6995 
    6996 #endif // #if VMA_STATS_STRING_ENABLED
    6997 
    6999 // VmaJsonWriter
    7000 
    7001 #if VMA_STATS_STRING_ENABLED
    7002 
    7003 class VmaJsonWriter
    7004 {
    7005  VMA_CLASS_NO_COPY(VmaJsonWriter)
    7006 public:
    7007  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    7008  ~VmaJsonWriter();
    7009 
    7010  void BeginObject(bool singleLine = false);
    7011  void EndObject();
    7012 
    7013  void BeginArray(bool singleLine = false);
    7014  void EndArray();
    7015 
    7016  void WriteString(const char* pStr);
    7017  void BeginString(const char* pStr = VMA_NULL);
    7018  void ContinueString(const char* pStr);
    7019  void ContinueString(uint32_t n);
    7020  void ContinueString(uint64_t n);
    7021  void ContinueString_Pointer(const void* ptr);
    7022  void EndString(const char* pStr = VMA_NULL);
    7023 
    7024  void WriteNumber(uint32_t n);
    7025  void WriteNumber(uint64_t n);
    7026  void WriteBool(bool b);
    7027  void WriteNull();
    7028 
    7029 private:
    7030  static const char* const INDENT;
    7031 
    7032  enum COLLECTION_TYPE
    7033  {
    7034  COLLECTION_TYPE_OBJECT,
    7035  COLLECTION_TYPE_ARRAY,
    7036  };
    7037  struct StackItem
    7038  {
    7039  COLLECTION_TYPE type;
    7040  uint32_t valueCount;
    7041  bool singleLineMode;
    7042  };
    7043 
    7044  VmaStringBuilder& m_SB;
    7045  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    7046  bool m_InsideString;
    7047 
    7048  void BeginValue(bool isString);
    7049  void WriteIndent(bool oneLess = false);
    7050 };
    7051 
    7052 const char* const VmaJsonWriter::INDENT = " ";
    7053 
    7054 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    7055  m_SB(sb),
    7056  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    7057  m_InsideString(false)
    7058 {
    7059 }
    7060 
    7061 VmaJsonWriter::~VmaJsonWriter()
    7062 {
    7063  VMA_ASSERT(!m_InsideString);
    7064  VMA_ASSERT(m_Stack.empty());
    7065 }
    7066 
    7067 void VmaJsonWriter::BeginObject(bool singleLine)
    7068 {
    7069  VMA_ASSERT(!m_InsideString);
    7070 
    7071  BeginValue(false);
    7072  m_SB.Add('{');
    7073 
    7074  StackItem item;
    7075  item.type = COLLECTION_TYPE_OBJECT;
    7076  item.valueCount = 0;
    7077  item.singleLineMode = singleLine;
    7078  m_Stack.push_back(item);
    7079 }
    7080 
    7081 void VmaJsonWriter::EndObject()
    7082 {
    7083  VMA_ASSERT(!m_InsideString);
    7084 
    7085  WriteIndent(true);
    7086  m_SB.Add('}');
    7087 
    7088  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    7089  m_Stack.pop_back();
    7090 }
    7091 
    7092 void VmaJsonWriter::BeginArray(bool singleLine)
    7093 {
    7094  VMA_ASSERT(!m_InsideString);
    7095 
    7096  BeginValue(false);
    7097  m_SB.Add('[');
    7098 
    7099  StackItem item;
    7100  item.type = COLLECTION_TYPE_ARRAY;
    7101  item.valueCount = 0;
    7102  item.singleLineMode = singleLine;
    7103  m_Stack.push_back(item);
    7104 }
    7105 
    7106 void VmaJsonWriter::EndArray()
    7107 {
    7108  VMA_ASSERT(!m_InsideString);
    7109 
    7110  WriteIndent(true);
    7111  m_SB.Add(']');
    7112 
    7113  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    7114  m_Stack.pop_back();
    7115 }
    7116 
    7117 void VmaJsonWriter::WriteString(const char* pStr)
    7118 {
    7119  BeginString(pStr);
    7120  EndString();
    7121 }
    7122 
    7123 void VmaJsonWriter::BeginString(const char* pStr)
    7124 {
    7125  VMA_ASSERT(!m_InsideString);
    7126 
    7127  BeginValue(true);
    7128  m_SB.Add('"');
    7129  m_InsideString = true;
    7130  if(pStr != VMA_NULL && pStr[0] != '\0')
    7131  {
    7132  ContinueString(pStr);
    7133  }
    7134 }
    7135 
    7136 void VmaJsonWriter::ContinueString(const char* pStr)
    7137 {
    7138  VMA_ASSERT(m_InsideString);
    7139 
    7140  const size_t strLen = strlen(pStr);
    7141  for(size_t i = 0; i < strLen; ++i)
    7142  {
    7143  char ch = pStr[i];
    7144  if(ch == '\\')
    7145  {
    7146  m_SB.Add("\\\\");
    7147  }
    7148  else if(ch == '"')
    7149  {
    7150  m_SB.Add("\\\"");
    7151  }
    7152  else if(ch >= 32)
    7153  {
    7154  m_SB.Add(ch);
    7155  }
    7156  else switch(ch)
    7157  {
    7158  case '\b':
    7159  m_SB.Add("\\b");
    7160  break;
    7161  case '\f':
    7162  m_SB.Add("\\f");
    7163  break;
    7164  case '\n':
    7165  m_SB.Add("\\n");
    7166  break;
    7167  case '\r':
    7168  m_SB.Add("\\r");
    7169  break;
    7170  case '\t':
    7171  m_SB.Add("\\t");
    7172  break;
    7173  default:
    7174  VMA_ASSERT(0 && "Character not currently supported.");
    7175  break;
    7176  }
    7177  }
    7178 }
    7179 
    7180 void VmaJsonWriter::ContinueString(uint32_t n)
    7181 {
    7182  VMA_ASSERT(m_InsideString);
    7183  m_SB.AddNumber(n);
    7184 }
    7185 
    7186 void VmaJsonWriter::ContinueString(uint64_t n)
    7187 {
    7188  VMA_ASSERT(m_InsideString);
    7189  m_SB.AddNumber(n);
    7190 }
    7191 
    7192 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
    7193 {
    7194  VMA_ASSERT(m_InsideString);
    7195  m_SB.AddPointer(ptr);
    7196 }
    7197 
    7198 void VmaJsonWriter::EndString(const char* pStr)
    7199 {
    7200  VMA_ASSERT(m_InsideString);
    7201  if(pStr != VMA_NULL && pStr[0] != '\0')
    7202  {
    7203  ContinueString(pStr);
    7204  }
    7205  m_SB.Add('"');
    7206  m_InsideString = false;
    7207 }
    7208 
    7209 void VmaJsonWriter::WriteNumber(uint32_t n)
    7210 {
    7211  VMA_ASSERT(!m_InsideString);
    7212  BeginValue(false);
    7213  m_SB.AddNumber(n);
    7214 }
    7215 
    7216 void VmaJsonWriter::WriteNumber(uint64_t n)
    7217 {
    7218  VMA_ASSERT(!m_InsideString);
    7219  BeginValue(false);
    7220  m_SB.AddNumber(n);
    7221 }
    7222 
    7223 void VmaJsonWriter::WriteBool(bool b)
    7224 {
    7225  VMA_ASSERT(!m_InsideString);
    7226  BeginValue(false);
    7227  m_SB.Add(b ? "true" : "false");
    7228 }
    7229 
    7230 void VmaJsonWriter::WriteNull()
    7231 {
    7232  VMA_ASSERT(!m_InsideString);
    7233  BeginValue(false);
    7234  m_SB.Add("null");
    7235 }
    7236 
    7237 void VmaJsonWriter::BeginValue(bool isString)
    7238 {
    7239  if(!m_Stack.empty())
    7240  {
    7241  StackItem& currItem = m_Stack.back();
    7242  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    7243  currItem.valueCount % 2 == 0)
    7244  {
    7245  VMA_ASSERT(isString);
    7246  }
    7247 
    7248  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    7249  currItem.valueCount % 2 != 0)
    7250  {
    7251  m_SB.Add(": ");
    7252  }
    7253  else if(currItem.valueCount > 0)
    7254  {
    7255  m_SB.Add(", ");
    7256  WriteIndent();
    7257  }
    7258  else
    7259  {
    7260  WriteIndent();
    7261  }
    7262  ++currItem.valueCount;
    7263  }
    7264 }
    7265 
    7266 void VmaJsonWriter::WriteIndent(bool oneLess)
    7267 {
    7268  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    7269  {
    7270  m_SB.AddNewLine();
    7271 
    7272  size_t count = m_Stack.size();
    7273  if(count > 0 && oneLess)
    7274  {
    7275  --count;
    7276  }
    7277  for(size_t i = 0; i < count; ++i)
    7278  {
    7279  m_SB.Add(INDENT);
    7280  }
    7281  }
    7282 }
    7283 
    7284 #endif // #if VMA_STATS_STRING_ENABLED
    7285 
    7287 
    7288 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
    7289 {
    7290  if(IsUserDataString())
    7291  {
    7292  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
    7293 
    7294  FreeUserDataString(hAllocator);
    7295 
    7296  if(pUserData != VMA_NULL)
    7297  {
    7298  const char* const newStrSrc = (char*)pUserData;
    7299  const size_t newStrLen = strlen(newStrSrc);
    7300  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
    7301  memcpy(newStrDst, newStrSrc, newStrLen + 1);
    7302  m_pUserData = newStrDst;
    7303  }
    7304  }
    7305  else
    7306  {
    7307  m_pUserData = pUserData;
    7308  }
    7309 }
    7310 
    7311 void VmaAllocation_T::ChangeBlockAllocation(
    7312  VmaAllocator hAllocator,
    7313  VmaDeviceMemoryBlock* block,
    7314  VkDeviceSize offset)
    7315 {
    7316  VMA_ASSERT(block != VMA_NULL);
    7317  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    7318 
    7319  // Move mapping reference counter from old block to new block.
    7320  if(block != m_BlockAllocation.m_Block)
    7321  {
    7322  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
    7323  if(IsPersistentMap())
    7324  ++mapRefCount;
    7325  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
    7326  block->Map(hAllocator, mapRefCount, VMA_NULL);
    7327  }
    7328 
    7329  m_BlockAllocation.m_Block = block;
    7330  m_BlockAllocation.m_Offset = offset;
    7331 }
    7332 
    7333 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
    7334 {
    7335  VMA_ASSERT(newSize > 0);
    7336  m_Size = newSize;
    7337 }
    7338 
    7339 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
    7340 {
    7341  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    7342  m_BlockAllocation.m_Offset = newOffset;
    7343 }
    7344 
    7345 VkDeviceSize VmaAllocation_T::GetOffset() const
    7346 {
    7347  switch(m_Type)
    7348  {
    7349  case ALLOCATION_TYPE_BLOCK:
    7350  return m_BlockAllocation.m_Offset;
    7351  case ALLOCATION_TYPE_DEDICATED:
    7352  return 0;
    7353  default:
    7354  VMA_ASSERT(0);
    7355  return 0;
    7356  }
    7357 }
    7358 
    7359 VkDeviceMemory VmaAllocation_T::GetMemory() const
    7360 {
    7361  switch(m_Type)
    7362  {
    7363  case ALLOCATION_TYPE_BLOCK:
    7364  return m_BlockAllocation.m_Block->GetDeviceMemory();
    7365  case ALLOCATION_TYPE_DEDICATED:
    7366  return m_DedicatedAllocation.m_hMemory;
    7367  default:
    7368  VMA_ASSERT(0);
    7369  return VK_NULL_HANDLE;
    7370  }
    7371 }
    7372 
    7373 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    7374 {
    7375  switch(m_Type)
    7376  {
    7377  case ALLOCATION_TYPE_BLOCK:
    7378  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
    7379  case ALLOCATION_TYPE_DEDICATED:
    7380  return m_DedicatedAllocation.m_MemoryTypeIndex;
    7381  default:
    7382  VMA_ASSERT(0);
    7383  return UINT32_MAX;
    7384  }
    7385 }
    7386 
    7387 void* VmaAllocation_T::GetMappedData() const
    7388 {
    7389  switch(m_Type)
    7390  {
    7391  case ALLOCATION_TYPE_BLOCK:
    7392  if(m_MapCount != 0)
    7393  {
    7394  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
    7395  VMA_ASSERT(pBlockData != VMA_NULL);
    7396  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    7397  }
    7398  else
    7399  {
    7400  return VMA_NULL;
    7401  }
    7402  break;
    7403  case ALLOCATION_TYPE_DEDICATED:
    7404  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    7405  return m_DedicatedAllocation.m_pMappedData;
    7406  default:
    7407  VMA_ASSERT(0);
    7408  return VMA_NULL;
    7409  }
    7410 }
    7411 
    7412 bool VmaAllocation_T::CanBecomeLost() const
    7413 {
    7414  switch(m_Type)
    7415  {
    7416  case ALLOCATION_TYPE_BLOCK:
    7417  return m_BlockAllocation.m_CanBecomeLost;
    7418  case ALLOCATION_TYPE_DEDICATED:
    7419  return false;
    7420  default:
    7421  VMA_ASSERT(0);
    7422  return false;
    7423  }
    7424 }
    7425 
    7426 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    7427 {
    7428  VMA_ASSERT(CanBecomeLost());
    7429 
    7430  /*
    7431  Warning: This is a carefully designed algorithm.
    7432  Do not modify unless you really know what you're doing :)
    7433  */
    7434  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    7435  for(;;)
    7436  {
    7437  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    7438  {
    7439  VMA_ASSERT(0);
    7440  return false;
    7441  }
    7442  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    7443  {
    7444  return false;
    7445  }
    7446  else // Last use time earlier than current time.
    7447  {
    7448  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    7449  {
    7450  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    7451  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    7452  return true;
    7453  }
    7454  }
    7455  }
    7456 }
    7457 
    7458 #if VMA_STATS_STRING_ENABLED
    7459 
    7460 // Correspond to values of enum VmaSuballocationType.
    7461 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    7462  "FREE",
    7463  "UNKNOWN",
    7464  "BUFFER",
    7465  "IMAGE_UNKNOWN",
    7466  "IMAGE_LINEAR",
    7467  "IMAGE_OPTIMAL",
    7468 };
    7469 
    7470 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
    7471 {
    7472  json.WriteString("Type");
    7473  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
    7474 
    7475  json.WriteString("Size");
    7476  json.WriteNumber(m_Size);
    7477 
    7478  if(m_pUserData != VMA_NULL)
    7479  {
    7480  json.WriteString("UserData");
    7481  if(IsUserDataString())
    7482  {
    7483  json.WriteString((const char*)m_pUserData);
    7484  }
    7485  else
    7486  {
    7487  json.BeginString();
    7488  json.ContinueString_Pointer(m_pUserData);
    7489  json.EndString();
    7490  }
    7491  }
    7492 
    7493  json.WriteString("CreationFrameIndex");
    7494  json.WriteNumber(m_CreationFrameIndex);
    7495 
    7496  json.WriteString("LastUseFrameIndex");
    7497  json.WriteNumber(GetLastUseFrameIndex());
    7498 
    7499  if(m_BufferImageUsage != 0)
    7500  {
    7501  json.WriteString("Usage");
    7502  json.WriteNumber(m_BufferImageUsage);
    7503  }
    7504 }
    7505 
    7506 #endif
    7507 
    7508 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
    7509 {
    7510  VMA_ASSERT(IsUserDataString());
    7511  if(m_pUserData != VMA_NULL)
    7512  {
    7513  char* const oldStr = (char*)m_pUserData;
    7514  const size_t oldStrLen = strlen(oldStr);
    7515  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
    7516  m_pUserData = VMA_NULL;
    7517  }
    7518 }
    7519 
    7520 void VmaAllocation_T::BlockAllocMap()
    7521 {
    7522  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    7523 
    7524  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    7525  {
    7526  ++m_MapCount;
    7527  }
    7528  else
    7529  {
    7530  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
    7531  }
    7532 }
    7533 
    7534 void VmaAllocation_T::BlockAllocUnmap()
    7535 {
    7536  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    7537 
    7538  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    7539  {
    7540  --m_MapCount;
    7541  }
    7542  else
    7543  {
    7544  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
    7545  }
    7546 }
    7547 
    7548 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    7549 {
    7550  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    7551 
    7552  if(m_MapCount != 0)
    7553  {
    7554  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    7555  {
    7556  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    7557  *ppData = m_DedicatedAllocation.m_pMappedData;
    7558  ++m_MapCount;
    7559  return VK_SUCCESS;
    7560  }
    7561  else
    7562  {
    7563  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    7564  return VK_ERROR_MEMORY_MAP_FAILED;
    7565  }
    7566  }
    7567  else
    7568  {
    7569  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    7570  hAllocator->m_hDevice,
    7571  m_DedicatedAllocation.m_hMemory,
    7572  0, // offset
    7573  VK_WHOLE_SIZE,
    7574  0, // flags
    7575  ppData);
    7576  if(result == VK_SUCCESS)
    7577  {
    7578  m_DedicatedAllocation.m_pMappedData = *ppData;
    7579  m_MapCount = 1;
    7580  }
    7581  return result;
    7582  }
    7583 }
    7584 
    7585 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    7586 {
    7587  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    7588 
    7589  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    7590  {
    7591  --m_MapCount;
    7592  if(m_MapCount == 0)
    7593  {
    7594  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    7595  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    7596  hAllocator->m_hDevice,
    7597  m_DedicatedAllocation.m_hMemory);
    7598  }
    7599  }
    7600  else
    7601  {
    7602  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    7603  }
    7604 }
    7605 
    7606 #if VMA_STATS_STRING_ENABLED
    7607 
    7608 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    7609 {
    7610  json.BeginObject();
    7611 
    7612  json.WriteString("Blocks");
    7613  json.WriteNumber(stat.blockCount);
    7614 
    7615  json.WriteString("Allocations");
    7616  json.WriteNumber(stat.allocationCount);
    7617 
    7618  json.WriteString("UnusedRanges");
    7619  json.WriteNumber(stat.unusedRangeCount);
    7620 
    7621  json.WriteString("UsedBytes");
    7622  json.WriteNumber(stat.usedBytes);
    7623 
    7624  json.WriteString("UnusedBytes");
    7625  json.WriteNumber(stat.unusedBytes);
    7626 
    7627  if(stat.allocationCount > 1)
    7628  {
    7629  json.WriteString("AllocationSize");
    7630  json.BeginObject(true);
    7631  json.WriteString("Min");
    7632  json.WriteNumber(stat.allocationSizeMin);
    7633  json.WriteString("Avg");
    7634  json.WriteNumber(stat.allocationSizeAvg);
    7635  json.WriteString("Max");
    7636  json.WriteNumber(stat.allocationSizeMax);
    7637  json.EndObject();
    7638  }
    7639 
    7640  if(stat.unusedRangeCount > 1)
    7641  {
    7642  json.WriteString("UnusedRangeSize");
    7643  json.BeginObject(true);
    7644  json.WriteString("Min");
    7645  json.WriteNumber(stat.unusedRangeSizeMin);
    7646  json.WriteString("Avg");
    7647  json.WriteNumber(stat.unusedRangeSizeAvg);
    7648  json.WriteString("Max");
    7649  json.WriteNumber(stat.unusedRangeSizeMax);
    7650  json.EndObject();
    7651  }
    7652 
    7653  json.EndObject();
    7654 }
    7655 
    7656 #endif // #if VMA_STATS_STRING_ENABLED
    7657 
    7658 struct VmaSuballocationItemSizeLess
    7659 {
    7660  bool operator()(
    7661  const VmaSuballocationList::iterator lhs,
    7662  const VmaSuballocationList::iterator rhs) const
    7663  {
    7664  return lhs->size < rhs->size;
    7665  }
    7666  bool operator()(
    7667  const VmaSuballocationList::iterator lhs,
    7668  VkDeviceSize rhsSize) const
    7669  {
    7670  return lhs->size < rhsSize;
    7671  }
    7672 };
    7673 
    7674 
    7676 // class VmaBlockMetadata
    7677 
    7678 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    7679  m_Size(0),
    7680  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
    7681 {
    7682 }
    7683 
    7684 #if VMA_STATS_STRING_ENABLED
    7685 
    7686 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
    7687  VkDeviceSize unusedBytes,
    7688  size_t allocationCount,
    7689  size_t unusedRangeCount) const
    7690 {
    7691  json.BeginObject();
    7692 
    7693  json.WriteString("TotalBytes");
    7694  json.WriteNumber(GetSize());
    7695 
    7696  json.WriteString("UnusedBytes");
    7697  json.WriteNumber(unusedBytes);
    7698 
    7699  json.WriteString("Allocations");
    7700  json.WriteNumber((uint64_t)allocationCount);
    7701 
    7702  json.WriteString("UnusedRanges");
    7703  json.WriteNumber((uint64_t)unusedRangeCount);
    7704 
    7705  json.WriteString("Suballocations");
    7706  json.BeginArray();
    7707 }
    7708 
    7709 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    7710  VkDeviceSize offset,
    7711  VmaAllocation hAllocation) const
    7712 {
    7713  json.BeginObject(true);
    7714 
    7715  json.WriteString("Offset");
    7716  json.WriteNumber(offset);
    7717 
    7718  hAllocation->PrintParameters(json);
    7719 
    7720  json.EndObject();
    7721 }
    7722 
    7723 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    7724  VkDeviceSize offset,
    7725  VkDeviceSize size) const
    7726 {
    7727  json.BeginObject(true);
    7728 
    7729  json.WriteString("Offset");
    7730  json.WriteNumber(offset);
    7731 
    7732  json.WriteString("Type");
    7733  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
    7734 
    7735  json.WriteString("Size");
    7736  json.WriteNumber(size);
    7737 
    7738  json.EndObject();
    7739 }
    7740 
    7741 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
    7742 {
    7743  json.EndArray();
    7744  json.EndObject();
    7745 }
    7746 
    7747 #endif // #if VMA_STATS_STRING_ENABLED
    7748 
    7750 // class VmaBlockMetadata_Generic
    7751 
    7752 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
    7753  VmaBlockMetadata(hAllocator),
    7754  m_FreeCount(0),
    7755  m_SumFreeSize(0),
    7756  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7757  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    7758 {
    7759 }
    7760 
    7761 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
    7762 {
    7763 }
    7764 
    7765 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
    7766 {
    7767  VmaBlockMetadata::Init(size);
    7768 
    7769  m_FreeCount = 1;
    7770  m_SumFreeSize = size;
    7771 
    7772  VmaSuballocation suballoc = {};
    7773  suballoc.offset = 0;
    7774  suballoc.size = size;
    7775  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7776  suballoc.hAllocation = VK_NULL_HANDLE;
    7777 
    7778  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    7779  m_Suballocations.push_back(suballoc);
    7780  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    7781  --suballocItem;
    7782  m_FreeSuballocationsBySize.push_back(suballocItem);
    7783 }
    7784 
    7785 bool VmaBlockMetadata_Generic::Validate() const
    7786 {
    7787  VMA_VALIDATE(!m_Suballocations.empty());
    7788 
    7789  // Expected offset of new suballocation as calculated from previous ones.
    7790  VkDeviceSize calculatedOffset = 0;
    7791  // Expected number of free suballocations as calculated from traversing their list.
    7792  uint32_t calculatedFreeCount = 0;
    7793  // Expected sum size of free suballocations as calculated from traversing their list.
    7794  VkDeviceSize calculatedSumFreeSize = 0;
    7795  // Expected number of free suballocations that should be registered in
    7796  // m_FreeSuballocationsBySize calculated from traversing their list.
    7797  size_t freeSuballocationsToRegister = 0;
    7798  // True if previous visited suballocation was free.
    7799  bool prevFree = false;
    7800 
    7801  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    7802  suballocItem != m_Suballocations.cend();
    7803  ++suballocItem)
    7804  {
    7805  const VmaSuballocation& subAlloc = *suballocItem;
    7806 
    7807  // Actual offset of this suballocation doesn't match expected one.
    7808  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
    7809 
    7810  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7811  // Two adjacent free suballocations are invalid. They should be merged.
    7812  VMA_VALIDATE(!prevFree || !currFree);
    7813 
    7814  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
    7815 
    7816  if(currFree)
    7817  {
    7818  calculatedSumFreeSize += subAlloc.size;
    7819  ++calculatedFreeCount;
    7820  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7821  {
    7822  ++freeSuballocationsToRegister;
    7823  }
    7824 
    7825  // Margin required between allocations - every free space must be at least that large.
    7826  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
    7827  }
    7828  else
    7829  {
    7830  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
    7831  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
    7832 
    7833  // Margin required between allocations - previous allocation must be free.
    7834  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
    7835  }
    7836 
    7837  calculatedOffset += subAlloc.size;
    7838  prevFree = currFree;
    7839  }
    7840 
    7841  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    7842  // match expected one.
    7843  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
    7844 
    7845  VkDeviceSize lastSize = 0;
    7846  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    7847  {
    7848  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    7849 
    7850  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    7851  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    7852  // They must be sorted by size ascending.
    7853  VMA_VALIDATE(suballocItem->size >= lastSize);
    7854 
    7855  lastSize = suballocItem->size;
    7856  }
    7857 
    7858  // Check if totals match calculacted values.
    7859  VMA_VALIDATE(ValidateFreeSuballocationList());
    7860  VMA_VALIDATE(calculatedOffset == GetSize());
    7861  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
    7862  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
    7863 
    7864  return true;
    7865 }
    7866 
    7867 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
    7868 {
    7869  if(!m_FreeSuballocationsBySize.empty())
    7870  {
    7871  return m_FreeSuballocationsBySize.back()->size;
    7872  }
    7873  else
    7874  {
    7875  return 0;
    7876  }
    7877 }
    7878 
    7879 bool VmaBlockMetadata_Generic::IsEmpty() const
    7880 {
    7881  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    7882 }
    7883 
    7884 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    7885 {
    7886  outInfo.blockCount = 1;
    7887 
    7888  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    7889  outInfo.allocationCount = rangeCount - m_FreeCount;
    7890  outInfo.unusedRangeCount = m_FreeCount;
    7891 
    7892  outInfo.unusedBytes = m_SumFreeSize;
    7893  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
    7894 
    7895  outInfo.allocationSizeMin = UINT64_MAX;
    7896  outInfo.allocationSizeMax = 0;
    7897  outInfo.unusedRangeSizeMin = UINT64_MAX;
    7898  outInfo.unusedRangeSizeMax = 0;
    7899 
    7900  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    7901  suballocItem != m_Suballocations.cend();
    7902  ++suballocItem)
    7903  {
    7904  const VmaSuballocation& suballoc = *suballocItem;
    7905  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    7906  {
    7907  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    7908  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    7909  }
    7910  else
    7911  {
    7912  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    7913  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    7914  }
    7915  }
    7916 }
    7917 
    7918 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
    7919 {
    7920  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    7921 
    7922  inoutStats.size += GetSize();
    7923  inoutStats.unusedSize += m_SumFreeSize;
    7924  inoutStats.allocationCount += rangeCount - m_FreeCount;
    7925  inoutStats.unusedRangeCount += m_FreeCount;
    7926  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    7927 }
    7928 
    7929 #if VMA_STATS_STRING_ENABLED
    7930 
    7931 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
    7932 {
    7933  PrintDetailedMap_Begin(json,
    7934  m_SumFreeSize, // unusedBytes
    7935  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
    7936  m_FreeCount); // unusedRangeCount
    7937 
    7938  size_t i = 0;
    7939  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    7940  suballocItem != m_Suballocations.cend();
    7941  ++suballocItem, ++i)
    7942  {
    7943  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7944  {
    7945  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
    7946  }
    7947  else
    7948  {
    7949  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
    7950  }
    7951  }
    7952 
    7953  PrintDetailedMap_End(json);
    7954 }
    7955 
    7956 #endif // #if VMA_STATS_STRING_ENABLED
    7957 
    7958 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
    7959  uint32_t currentFrameIndex,
    7960  uint32_t frameInUseCount,
    7961  VkDeviceSize bufferImageGranularity,
    7962  VkDeviceSize allocSize,
    7963  VkDeviceSize allocAlignment,
    7964  bool upperAddress,
    7965  VmaSuballocationType allocType,
    7966  bool canMakeOtherLost,
    7967  uint32_t strategy,
    7968  VmaAllocationRequest* pAllocationRequest)
    7969 {
    7970  VMA_ASSERT(allocSize > 0);
    7971  VMA_ASSERT(!upperAddress);
    7972  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    7973  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    7974  VMA_HEAVY_ASSERT(Validate());
    7975 
    7976  pAllocationRequest->type = VmaAllocationRequestType::Normal;
    7977 
    7978  // There is not enough total free space in this block to fullfill the request: Early return.
    7979  if(canMakeOtherLost == false &&
    7980  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
    7981  {
    7982  return false;
    7983  }
    7984 
    7985  // New algorithm, efficiently searching freeSuballocationsBySize.
    7986  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    7987  if(freeSuballocCount > 0)
    7988  {
    7990  {
    7991  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
    7992  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7993  m_FreeSuballocationsBySize.data(),
    7994  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    7995  allocSize + 2 * VMA_DEBUG_MARGIN,
    7996  VmaSuballocationItemSizeLess());
    7997  size_t index = it - m_FreeSuballocationsBySize.data();
    7998  for(; index < freeSuballocCount; ++index)
    7999  {
    8000  if(CheckAllocation(
    8001  currentFrameIndex,
    8002  frameInUseCount,
    8003  bufferImageGranularity,
    8004  allocSize,
    8005  allocAlignment,
    8006  allocType,
    8007  m_FreeSuballocationsBySize[index],
    8008  false, // canMakeOtherLost
    8009  &pAllocationRequest->offset,
    8010  &pAllocationRequest->itemsToMakeLostCount,
    8011  &pAllocationRequest->sumFreeSize,
    8012  &pAllocationRequest->sumItemSize))
    8013  {
    8014  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    8015  return true;
    8016  }
    8017  }
    8018  }
    8019  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
    8020  {
    8021  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    8022  it != m_Suballocations.end();
    8023  ++it)
    8024  {
    8025  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
    8026  currentFrameIndex,
    8027  frameInUseCount,
    8028  bufferImageGranularity,
    8029  allocSize,
    8030  allocAlignment,
    8031  allocType,
    8032  it,
    8033  false, // canMakeOtherLost
    8034  &pAllocationRequest->offset,
    8035  &pAllocationRequest->itemsToMakeLostCount,
    8036  &pAllocationRequest->sumFreeSize,
    8037  &pAllocationRequest->sumItemSize))
    8038  {
    8039  pAllocationRequest->item = it;
    8040  return true;
    8041  }
    8042  }
    8043  }
    8044  else // WORST_FIT, FIRST_FIT
    8045  {
    8046  // Search staring from biggest suballocations.
    8047  for(size_t index = freeSuballocCount; index--; )
    8048  {
    8049  if(CheckAllocation(
    8050  currentFrameIndex,
    8051  frameInUseCount,
    8052  bufferImageGranularity,
    8053  allocSize,
    8054  allocAlignment,
    8055  allocType,
    8056  m_FreeSuballocationsBySize[index],
    8057  false, // canMakeOtherLost
    8058  &pAllocationRequest->offset,
    8059  &pAllocationRequest->itemsToMakeLostCount,
    8060  &pAllocationRequest->sumFreeSize,
    8061  &pAllocationRequest->sumItemSize))
    8062  {
    8063  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    8064  return true;
    8065  }
    8066  }
    8067  }
    8068  }
    8069 
    8070  if(canMakeOtherLost)
    8071  {
    8072  // Brute-force algorithm. TODO: Come up with something better.
    8073 
    8074  bool found = false;
    8075  VmaAllocationRequest tmpAllocRequest = {};
    8076  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
    8077  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    8078  suballocIt != m_Suballocations.end();
    8079  ++suballocIt)
    8080  {
    8081  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    8082  suballocIt->hAllocation->CanBecomeLost())
    8083  {
    8084  if(CheckAllocation(
    8085  currentFrameIndex,
    8086  frameInUseCount,
    8087  bufferImageGranularity,
    8088  allocSize,
    8089  allocAlignment,
    8090  allocType,
    8091  suballocIt,
    8092  canMakeOtherLost,
    8093  &tmpAllocRequest.offset,
    8094  &tmpAllocRequest.itemsToMakeLostCount,
    8095  &tmpAllocRequest.sumFreeSize,
    8096  &tmpAllocRequest.sumItemSize))
    8097  {
    8099  {
    8100  *pAllocationRequest = tmpAllocRequest;
    8101  pAllocationRequest->item = suballocIt;
    8102  break;
    8103  }
    8104  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
    8105  {
    8106  *pAllocationRequest = tmpAllocRequest;
    8107  pAllocationRequest->item = suballocIt;
    8108  found = true;
    8109  }
    8110  }
    8111  }
    8112  }
    8113 
    8114  return found;
    8115  }
    8116 
    8117  return false;
    8118 }
    8119 
    8120 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
    8121  uint32_t currentFrameIndex,
    8122  uint32_t frameInUseCount,
    8123  VmaAllocationRequest* pAllocationRequest)
    8124 {
    8125  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
    8126 
    8127  while(pAllocationRequest->itemsToMakeLostCount > 0)
    8128  {
    8129  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    8130  {
    8131  ++pAllocationRequest->item;
    8132  }
    8133  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    8134  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    8135  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    8136  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8137  {
    8138  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    8139  --pAllocationRequest->itemsToMakeLostCount;
    8140  }
    8141  else
    8142  {
    8143  return false;
    8144  }
    8145  }
    8146 
    8147  VMA_HEAVY_ASSERT(Validate());
    8148  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    8149  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8150 
    8151  return true;
    8152 }
    8153 
    8154 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    8155 {
    8156  uint32_t lostAllocationCount = 0;
    8157  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    8158  it != m_Suballocations.end();
    8159  ++it)
    8160  {
    8161  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    8162  it->hAllocation->CanBecomeLost() &&
    8163  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8164  {
    8165  it = FreeSuballocation(it);
    8166  ++lostAllocationCount;
    8167  }
    8168  }
    8169  return lostAllocationCount;
    8170 }
    8171 
    8172 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
    8173 {
    8174  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    8175  it != m_Suballocations.end();
    8176  ++it)
    8177  {
    8178  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    8179  {
    8180  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
    8181  {
    8182  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    8183  return VK_ERROR_VALIDATION_FAILED_EXT;
    8184  }
    8185  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
    8186  {
    8187  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    8188  return VK_ERROR_VALIDATION_FAILED_EXT;
    8189  }
    8190  }
    8191  }
    8192 
    8193  return VK_SUCCESS;
    8194 }
    8195 
    8196 void VmaBlockMetadata_Generic::Alloc(
    8197  const VmaAllocationRequest& request,
    8198  VmaSuballocationType type,
    8199  VkDeviceSize allocSize,
    8200  VmaAllocation hAllocation)
    8201 {
    8202  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
    8203  VMA_ASSERT(request.item != m_Suballocations.end());
    8204  VmaSuballocation& suballoc = *request.item;
    8205  // Given suballocation is a free block.
    8206  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8207  // Given offset is inside this suballocation.
    8208  VMA_ASSERT(request.offset >= suballoc.offset);
    8209  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    8210  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    8211  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    8212 
    8213  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    8214  // it to become used.
    8215  UnregisterFreeSuballocation(request.item);
    8216 
    8217  suballoc.offset = request.offset;
    8218  suballoc.size = allocSize;
    8219  suballoc.type = type;
    8220  suballoc.hAllocation = hAllocation;
    8221 
    8222  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    8223  if(paddingEnd)
    8224  {
    8225  VmaSuballocation paddingSuballoc = {};
    8226  paddingSuballoc.offset = request.offset + allocSize;
    8227  paddingSuballoc.size = paddingEnd;
    8228  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8229  VmaSuballocationList::iterator next = request.item;
    8230  ++next;
    8231  const VmaSuballocationList::iterator paddingEndItem =
    8232  m_Suballocations.insert(next, paddingSuballoc);
    8233  RegisterFreeSuballocation(paddingEndItem);
    8234  }
    8235 
    8236  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    8237  if(paddingBegin)
    8238  {
    8239  VmaSuballocation paddingSuballoc = {};
    8240  paddingSuballoc.offset = request.offset - paddingBegin;
    8241  paddingSuballoc.size = paddingBegin;
    8242  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8243  const VmaSuballocationList::iterator paddingBeginItem =
    8244  m_Suballocations.insert(request.item, paddingSuballoc);
    8245  RegisterFreeSuballocation(paddingBeginItem);
    8246  }
    8247 
    8248  // Update totals.
    8249  m_FreeCount = m_FreeCount - 1;
    8250  if(paddingBegin > 0)
    8251  {
    8252  ++m_FreeCount;
    8253  }
    8254  if(paddingEnd > 0)
    8255  {
    8256  ++m_FreeCount;
    8257  }
    8258  m_SumFreeSize -= allocSize;
    8259 }
    8260 
    8261 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
    8262 {
    8263  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    8264  suballocItem != m_Suballocations.end();
    8265  ++suballocItem)
    8266  {
    8267  VmaSuballocation& suballoc = *suballocItem;
    8268  if(suballoc.hAllocation == allocation)
    8269  {
    8270  FreeSuballocation(suballocItem);
    8271  VMA_HEAVY_ASSERT(Validate());
    8272  return;
    8273  }
    8274  }
    8275  VMA_ASSERT(0 && "Not found!");
    8276 }
    8277 
    8278 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
    8279 {
    8280  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    8281  suballocItem != m_Suballocations.end();
    8282  ++suballocItem)
    8283  {
    8284  VmaSuballocation& suballoc = *suballocItem;
    8285  if(suballoc.offset == offset)
    8286  {
    8287  FreeSuballocation(suballocItem);
    8288  return;
    8289  }
    8290  }
    8291  VMA_ASSERT(0 && "Not found!");
    8292 }
    8293 
    8294 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
    8295 {
    8296  typedef VmaSuballocationList::iterator iter_type;
    8297  for(iter_type suballocItem = m_Suballocations.begin();
    8298  suballocItem != m_Suballocations.end();
    8299  ++suballocItem)
    8300  {
    8301  VmaSuballocation& suballoc = *suballocItem;
    8302  if(suballoc.hAllocation == alloc)
    8303  {
    8304  iter_type nextItem = suballocItem;
    8305  ++nextItem;
    8306 
    8307  // Should have been ensured on higher level.
    8308  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
    8309 
    8310  // Shrinking.
    8311  if(newSize < alloc->GetSize())
    8312  {
    8313  const VkDeviceSize sizeDiff = suballoc.size - newSize;
    8314 
    8315  // There is next item.
    8316  if(nextItem != m_Suballocations.end())
    8317  {
    8318  // Next item is free.
    8319  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8320  {
    8321  // Grow this next item backward.
    8322  UnregisterFreeSuballocation(nextItem);
    8323  nextItem->offset -= sizeDiff;
    8324  nextItem->size += sizeDiff;
    8325  RegisterFreeSuballocation(nextItem);
    8326  }
    8327  // Next item is not free.
    8328  else
    8329  {
    8330  // Create free item after current one.
    8331  VmaSuballocation newFreeSuballoc;
    8332  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    8333  newFreeSuballoc.offset = suballoc.offset + newSize;
    8334  newFreeSuballoc.size = sizeDiff;
    8335  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8336  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
    8337  RegisterFreeSuballocation(newFreeSuballocIt);
    8338 
    8339  ++m_FreeCount;
    8340  }
    8341  }
    8342  // This is the last item.
    8343  else
    8344  {
    8345  // Create free item at the end.
    8346  VmaSuballocation newFreeSuballoc;
    8347  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    8348  newFreeSuballoc.offset = suballoc.offset + newSize;
    8349  newFreeSuballoc.size = sizeDiff;
    8350  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8351  m_Suballocations.push_back(newFreeSuballoc);
    8352 
    8353  iter_type newFreeSuballocIt = m_Suballocations.end();
    8354  RegisterFreeSuballocation(--newFreeSuballocIt);
    8355 
    8356  ++m_FreeCount;
    8357  }
    8358 
    8359  suballoc.size = newSize;
    8360  m_SumFreeSize += sizeDiff;
    8361  }
    8362  // Growing.
    8363  else
    8364  {
    8365  const VkDeviceSize sizeDiff = newSize - suballoc.size;
    8366 
    8367  // There is next item.
    8368  if(nextItem != m_Suballocations.end())
    8369  {
    8370  // Next item is free.
    8371  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8372  {
    8373  // There is not enough free space, including margin.
    8374  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
    8375  {
    8376  return false;
    8377  }
    8378 
    8379  // There is more free space than required.
    8380  if(nextItem->size > sizeDiff)
    8381  {
    8382  // Move and shrink this next item.
    8383  UnregisterFreeSuballocation(nextItem);
    8384  nextItem->offset += sizeDiff;
    8385  nextItem->size -= sizeDiff;
    8386  RegisterFreeSuballocation(nextItem);
    8387  }
    8388  // There is exactly the amount of free space required.
    8389  else
    8390  {
    8391  // Remove this next free item.
    8392  UnregisterFreeSuballocation(nextItem);
    8393  m_Suballocations.erase(nextItem);
    8394  --m_FreeCount;
    8395  }
    8396  }
    8397  // Next item is not free - there is no space to grow.
    8398  else
    8399  {
    8400  return false;
    8401  }
    8402  }
    8403  // This is the last item - there is no space to grow.
    8404  else
    8405  {
    8406  return false;
    8407  }
    8408 
    8409  suballoc.size = newSize;
    8410  m_SumFreeSize -= sizeDiff;
    8411  }
    8412 
    8413  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
    8414  return true;
    8415  }
    8416  }
    8417  VMA_ASSERT(0 && "Not found!");
    8418  return false;
    8419 }
    8420 
    8421 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
    8422 {
    8423  VkDeviceSize lastSize = 0;
    8424  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    8425  {
    8426  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    8427 
    8428  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
    8429  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    8430  VMA_VALIDATE(it->size >= lastSize);
    8431  lastSize = it->size;
    8432  }
    8433  return true;
    8434 }
    8435 
    8436 bool VmaBlockMetadata_Generic::CheckAllocation(
    8437  uint32_t currentFrameIndex,
    8438  uint32_t frameInUseCount,
    8439  VkDeviceSize bufferImageGranularity,
    8440  VkDeviceSize allocSize,
    8441  VkDeviceSize allocAlignment,
    8442  VmaSuballocationType allocType,
    8443  VmaSuballocationList::const_iterator suballocItem,
    8444  bool canMakeOtherLost,
    8445  VkDeviceSize* pOffset,
    8446  size_t* itemsToMakeLostCount,
    8447  VkDeviceSize* pSumFreeSize,
    8448  VkDeviceSize* pSumItemSize) const
    8449 {
    8450  VMA_ASSERT(allocSize > 0);
    8451  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    8452  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    8453  VMA_ASSERT(pOffset != VMA_NULL);
    8454 
    8455  *itemsToMakeLostCount = 0;
    8456  *pSumFreeSize = 0;
    8457  *pSumItemSize = 0;
    8458 
    8459  if(canMakeOtherLost)
    8460  {
    8461  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8462  {
    8463  *pSumFreeSize = suballocItem->size;
    8464  }
    8465  else
    8466  {
    8467  if(suballocItem->hAllocation->CanBecomeLost() &&
    8468  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8469  {
    8470  ++*itemsToMakeLostCount;
    8471  *pSumItemSize = suballocItem->size;
    8472  }
    8473  else
    8474  {
    8475  return false;
    8476  }
    8477  }
    8478 
    8479  // Remaining size is too small for this request: Early return.
    8480  if(GetSize() - suballocItem->offset < allocSize)
    8481  {
    8482  return false;
    8483  }
    8484 
    8485  // Start from offset equal to beginning of this suballocation.
    8486  *pOffset = suballocItem->offset;
    8487 
    8488  // Apply VMA_DEBUG_MARGIN at the beginning.
    8489  if(VMA_DEBUG_MARGIN > 0)
    8490  {
    8491  *pOffset += VMA_DEBUG_MARGIN;
    8492  }
    8493 
    8494  // Apply alignment.
    8495  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    8496 
    8497  // Check previous suballocations for BufferImageGranularity conflicts.
    8498  // Make bigger alignment if necessary.
    8499  if(bufferImageGranularity > 1)
    8500  {
    8501  bool bufferImageGranularityConflict = false;
    8502  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    8503  while(prevSuballocItem != m_Suballocations.cbegin())
    8504  {
    8505  --prevSuballocItem;
    8506  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    8507  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    8508  {
    8509  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8510  {
    8511  bufferImageGranularityConflict = true;
    8512  break;
    8513  }
    8514  }
    8515  else
    8516  // Already on previous page.
    8517  break;
    8518  }
    8519  if(bufferImageGranularityConflict)
    8520  {
    8521  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    8522  }
    8523  }
    8524 
    8525  // Now that we have final *pOffset, check if we are past suballocItem.
    8526  // If yes, return false - this function should be called for another suballocItem as starting point.
    8527  if(*pOffset >= suballocItem->offset + suballocItem->size)
    8528  {
    8529  return false;
    8530  }
    8531 
    8532  // Calculate padding at the beginning based on current offset.
    8533  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    8534 
    8535  // Calculate required margin at the end.
    8536  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    8537 
    8538  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    8539  // Another early return check.
    8540  if(suballocItem->offset + totalSize > GetSize())
    8541  {
    8542  return false;
    8543  }
    8544 
    8545  // Advance lastSuballocItem until desired size is reached.
    8546  // Update itemsToMakeLostCount.
    8547  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    8548  if(totalSize > suballocItem->size)
    8549  {
    8550  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    8551  while(remainingSize > 0)
    8552  {
    8553  ++lastSuballocItem;
    8554  if(lastSuballocItem == m_Suballocations.cend())
    8555  {
    8556  return false;
    8557  }
    8558  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8559  {
    8560  *pSumFreeSize += lastSuballocItem->size;
    8561  }
    8562  else
    8563  {
    8564  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    8565  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    8566  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8567  {
    8568  ++*itemsToMakeLostCount;
    8569  *pSumItemSize += lastSuballocItem->size;
    8570  }
    8571  else
    8572  {
    8573  return false;
    8574  }
    8575  }
    8576  remainingSize = (lastSuballocItem->size < remainingSize) ?
    8577  remainingSize - lastSuballocItem->size : 0;
    8578  }
    8579  }
    8580 
    8581  // Check next suballocations for BufferImageGranularity conflicts.
    8582  // If conflict exists, we must mark more allocations lost or fail.
    8583  if(bufferImageGranularity > 1)
    8584  {
    8585  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    8586  ++nextSuballocItem;
    8587  while(nextSuballocItem != m_Suballocations.cend())
    8588  {
    8589  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    8590  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8591  {
    8592  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8593  {
    8594  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    8595  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    8596  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8597  {
    8598  ++*itemsToMakeLostCount;
    8599  }
    8600  else
    8601  {
    8602  return false;
    8603  }
    8604  }
    8605  }
    8606  else
    8607  {
    8608  // Already on next page.
    8609  break;
    8610  }
    8611  ++nextSuballocItem;
    8612  }
    8613  }
    8614  }
    8615  else
    8616  {
    8617  const VmaSuballocation& suballoc = *suballocItem;
    8618  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8619 
    8620  *pSumFreeSize = suballoc.size;
    8621 
    8622  // Size of this suballocation is too small for this request: Early return.
    8623  if(suballoc.size < allocSize)
    8624  {
    8625  return false;
    8626  }
    8627 
    8628  // Start from offset equal to beginning of this suballocation.
    8629  *pOffset = suballoc.offset;
    8630 
    8631  // Apply VMA_DEBUG_MARGIN at the beginning.
    8632  if(VMA_DEBUG_MARGIN > 0)
    8633  {
    8634  *pOffset += VMA_DEBUG_MARGIN;
    8635  }
    8636 
    8637  // Apply alignment.
    8638  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    8639 
    8640  // Check previous suballocations for BufferImageGranularity conflicts.
    8641  // Make bigger alignment if necessary.
    8642  if(bufferImageGranularity > 1)
    8643  {
    8644  bool bufferImageGranularityConflict = false;
    8645  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    8646  while(prevSuballocItem != m_Suballocations.cbegin())
    8647  {
    8648  --prevSuballocItem;
    8649  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    8650  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    8651  {
    8652  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8653  {
    8654  bufferImageGranularityConflict = true;
    8655  break;
    8656  }
    8657  }
    8658  else
    8659  // Already on previous page.
    8660  break;
    8661  }
    8662  if(bufferImageGranularityConflict)
    8663  {
    8664  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    8665  }
    8666  }
    8667 
    8668  // Calculate padding at the beginning based on current offset.
    8669  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    8670 
    8671  // Calculate required margin at the end.
    8672  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    8673 
    8674  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    8675  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    8676  {
    8677  return false;
    8678  }
    8679 
    8680  // Check next suballocations for BufferImageGranularity conflicts.
    8681  // If conflict exists, allocation cannot be made here.
    8682  if(bufferImageGranularity > 1)
    8683  {
    8684  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    8685  ++nextSuballocItem;
    8686  while(nextSuballocItem != m_Suballocations.cend())
    8687  {
    8688  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    8689  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8690  {
    8691  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8692  {
    8693  return false;
    8694  }
    8695  }
    8696  else
    8697  {
    8698  // Already on next page.
    8699  break;
    8700  }
    8701  ++nextSuballocItem;
    8702  }
    8703  }
    8704  }
    8705 
    8706  // All tests passed: Success. pOffset is already filled.
    8707  return true;
    8708 }
    8709 
    8710 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
    8711 {
    8712  VMA_ASSERT(item != m_Suballocations.end());
    8713  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8714 
    8715  VmaSuballocationList::iterator nextItem = item;
    8716  ++nextItem;
    8717  VMA_ASSERT(nextItem != m_Suballocations.end());
    8718  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    8719 
    8720  item->size += nextItem->size;
    8721  --m_FreeCount;
    8722  m_Suballocations.erase(nextItem);
    8723 }
    8724 
    8725 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    8726 {
    8727  // Change this suballocation to be marked as free.
    8728  VmaSuballocation& suballoc = *suballocItem;
    8729  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8730  suballoc.hAllocation = VK_NULL_HANDLE;
    8731 
    8732  // Update totals.
    8733  ++m_FreeCount;
    8734  m_SumFreeSize += suballoc.size;
    8735 
    8736  // Merge with previous and/or next suballocation if it's also free.
    8737  bool mergeWithNext = false;
    8738  bool mergeWithPrev = false;
    8739 
    8740  VmaSuballocationList::iterator nextItem = suballocItem;
    8741  ++nextItem;
    8742  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    8743  {
    8744  mergeWithNext = true;
    8745  }
    8746 
    8747  VmaSuballocationList::iterator prevItem = suballocItem;
    8748  if(suballocItem != m_Suballocations.begin())
    8749  {
    8750  --prevItem;
    8751  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8752  {
    8753  mergeWithPrev = true;
    8754  }
    8755  }
    8756 
    8757  if(mergeWithNext)
    8758  {
    8759  UnregisterFreeSuballocation(nextItem);
    8760  MergeFreeWithNext(suballocItem);
    8761  }
    8762 
    8763  if(mergeWithPrev)
    8764  {
    8765  UnregisterFreeSuballocation(prevItem);
    8766  MergeFreeWithNext(prevItem);
    8767  RegisterFreeSuballocation(prevItem);
    8768  return prevItem;
    8769  }
    8770  else
    8771  {
    8772  RegisterFreeSuballocation(suballocItem);
    8773  return suballocItem;
    8774  }
    8775 }
    8776 
    8777 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    8778 {
    8779  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8780  VMA_ASSERT(item->size > 0);
    8781 
    8782  // You may want to enable this validation at the beginning or at the end of
    8783  // this function, depending on what do you want to check.
    8784  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8785 
    8786  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    8787  {
    8788  if(m_FreeSuballocationsBySize.empty())
    8789  {
    8790  m_FreeSuballocationsBySize.push_back(item);
    8791  }
    8792  else
    8793  {
    8794  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    8795  }
    8796  }
    8797 
    8798  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8799 }
    8800 
    8801 
    8802 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    8803 {
    8804  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8805  VMA_ASSERT(item->size > 0);
    8806 
    8807  // You may want to enable this validation at the beginning or at the end of
    8808  // this function, depending on what do you want to check.
    8809  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8810 
    8811  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    8812  {
    8813  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    8814  m_FreeSuballocationsBySize.data(),
    8815  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    8816  item,
    8817  VmaSuballocationItemSizeLess());
    8818  for(size_t index = it - m_FreeSuballocationsBySize.data();
    8819  index < m_FreeSuballocationsBySize.size();
    8820  ++index)
    8821  {
    8822  if(m_FreeSuballocationsBySize[index] == item)
    8823  {
    8824  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    8825  return;
    8826  }
    8827  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    8828  }
    8829  VMA_ASSERT(0 && "Not found.");
    8830  }
    8831 
    8832  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8833 }
    8834 
    8835 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
    8836  VkDeviceSize bufferImageGranularity,
    8837  VmaSuballocationType& inOutPrevSuballocType) const
    8838 {
    8839  if(bufferImageGranularity == 1 || IsEmpty())
    8840  {
    8841  return false;
    8842  }
    8843 
    8844  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
    8845  bool typeConflictFound = false;
    8846  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
    8847  it != m_Suballocations.cend();
    8848  ++it)
    8849  {
    8850  const VmaSuballocationType suballocType = it->type;
    8851  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
    8852  {
    8853  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
    8854  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
    8855  {
    8856  typeConflictFound = true;
    8857  }
    8858  inOutPrevSuballocType = suballocType;
    8859  }
    8860  }
    8861 
    8862  return typeConflictFound || minAlignment >= bufferImageGranularity;
    8863 }
    8864 
    8866 // class VmaBlockMetadata_Linear
    8867 
    8868 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
    8869  VmaBlockMetadata(hAllocator),
    8870  m_SumFreeSize(0),
    8871  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    8872  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    8873  m_1stVectorIndex(0),
    8874  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
    8875  m_1stNullItemsBeginCount(0),
    8876  m_1stNullItemsMiddleCount(0),
    8877  m_2ndNullItemsCount(0)
    8878 {
    8879 }
    8880 
    8881 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
    8882 {
    8883 }
    8884 
    8885 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
    8886 {
    8887  VmaBlockMetadata::Init(size);
    8888  m_SumFreeSize = size;
    8889 }
    8890 
    8891 bool VmaBlockMetadata_Linear::Validate() const
    8892 {
    8893  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8894  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8895 
    8896  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
    8897  VMA_VALIDATE(!suballocations1st.empty() ||
    8898  suballocations2nd.empty() ||
    8899  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
    8900 
    8901  if(!suballocations1st.empty())
    8902  {
    8903  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
    8904  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
    8905  // Null item at the end should be just pop_back().
    8906  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
    8907  }
    8908  if(!suballocations2nd.empty())
    8909  {
    8910  // Null item at the end should be just pop_back().
    8911  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
    8912  }
    8913 
    8914  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
    8915  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
    8916 
    8917  VkDeviceSize sumUsedSize = 0;
    8918  const size_t suballoc1stCount = suballocations1st.size();
    8919  VkDeviceSize offset = VMA_DEBUG_MARGIN;
    8920 
    8921  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8922  {
    8923  const size_t suballoc2ndCount = suballocations2nd.size();
    8924  size_t nullItem2ndCount = 0;
    8925  for(size_t i = 0; i < suballoc2ndCount; ++i)
    8926  {
    8927  const VmaSuballocation& suballoc = suballocations2nd[i];
    8928  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8929 
    8930  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    8931  VMA_VALIDATE(suballoc.offset >= offset);
    8932 
    8933  if(!currFree)
    8934  {
    8935  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    8936  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    8937  sumUsedSize += suballoc.size;
    8938  }
    8939  else
    8940  {
    8941  ++nullItem2ndCount;
    8942  }
    8943 
    8944  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    8945  }
    8946 
    8947  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    8948  }
    8949 
    8950  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
    8951  {
    8952  const VmaSuballocation& suballoc = suballocations1st[i];
    8953  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
    8954  suballoc.hAllocation == VK_NULL_HANDLE);
    8955  }
    8956 
    8957  size_t nullItem1stCount = m_1stNullItemsBeginCount;
    8958 
    8959  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
    8960  {
    8961  const VmaSuballocation& suballoc = suballocations1st[i];
    8962  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8963 
    8964  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    8965  VMA_VALIDATE(suballoc.offset >= offset);
    8966  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
    8967 
    8968  if(!currFree)
    8969  {
    8970  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    8971  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    8972  sumUsedSize += suballoc.size;
    8973  }
    8974  else
    8975  {
    8976  ++nullItem1stCount;
    8977  }
    8978 
    8979  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    8980  }
    8981  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
    8982 
    8983  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8984  {
    8985  const size_t suballoc2ndCount = suballocations2nd.size();
    8986  size_t nullItem2ndCount = 0;
    8987  for(size_t i = suballoc2ndCount; i--; )
    8988  {
    8989  const VmaSuballocation& suballoc = suballocations2nd[i];
    8990  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8991 
    8992  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    8993  VMA_VALIDATE(suballoc.offset >= offset);
    8994 
    8995  if(!currFree)
    8996  {
    8997  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    8998  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    8999  sumUsedSize += suballoc.size;
    9000  }
    9001  else
    9002  {
    9003  ++nullItem2ndCount;
    9004  }
    9005 
    9006  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    9007  }
    9008 
    9009  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    9010  }
    9011 
    9012  VMA_VALIDATE(offset <= GetSize());
    9013  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
    9014 
    9015  return true;
    9016 }
    9017 
    9018 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
    9019 {
    9020  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
    9021  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
    9022 }
    9023 
    9024 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
    9025 {
    9026  const VkDeviceSize size = GetSize();
    9027 
    9028  /*
    9029  We don't consider gaps inside allocation vectors with freed allocations because
    9030  they are not suitable for reuse in linear allocator. We consider only space that
    9031  is available for new allocations.
    9032  */
    9033  if(IsEmpty())
    9034  {
    9035  return size;
    9036  }
    9037 
    9038  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9039 
    9040  switch(m_2ndVectorMode)
    9041  {
    9042  case SECOND_VECTOR_EMPTY:
    9043  /*
    9044  Available space is after end of 1st, as well as before beginning of 1st (which
    9045  whould make it a ring buffer).
    9046  */
    9047  {
    9048  const size_t suballocations1stCount = suballocations1st.size();
    9049  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
    9050  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    9051  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
    9052  return VMA_MAX(
    9053  firstSuballoc.offset,
    9054  size - (lastSuballoc.offset + lastSuballoc.size));
    9055  }
    9056  break;
    9057 
    9058  case SECOND_VECTOR_RING_BUFFER:
    9059  /*
    9060  Available space is only between end of 2nd and beginning of 1st.
    9061  */
    9062  {
    9063  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9064  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
    9065  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
    9066  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
    9067  }
    9068  break;
    9069 
    9070  case SECOND_VECTOR_DOUBLE_STACK:
    9071  /*
    9072  Available space is only between end of 1st and top of 2nd.
    9073  */
    9074  {
    9075  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9076  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
    9077  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
    9078  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
    9079  }
    9080  break;
    9081 
    9082  default:
    9083  VMA_ASSERT(0);
    9084  return 0;
    9085  }
    9086 }
    9087 
    9088 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    9089 {
    9090  const VkDeviceSize size = GetSize();
    9091  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9092  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9093  const size_t suballoc1stCount = suballocations1st.size();
    9094  const size_t suballoc2ndCount = suballocations2nd.size();
    9095 
    9096  outInfo.blockCount = 1;
    9097  outInfo.allocationCount = (uint32_t)GetAllocationCount();
    9098  outInfo.unusedRangeCount = 0;
    9099  outInfo.usedBytes = 0;
    9100  outInfo.allocationSizeMin = UINT64_MAX;
    9101  outInfo.allocationSizeMax = 0;
    9102  outInfo.unusedRangeSizeMin = UINT64_MAX;
    9103  outInfo.unusedRangeSizeMax = 0;
    9104 
    9105  VkDeviceSize lastOffset = 0;
    9106 
    9107  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9108  {
    9109  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9110  size_t nextAlloc2ndIndex = 0;
    9111  while(lastOffset < freeSpace2ndTo1stEnd)
    9112  {
    9113  // Find next non-null allocation or move nextAllocIndex to the end.
    9114  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9115  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9116  {
    9117  ++nextAlloc2ndIndex;
    9118  }
    9119 
    9120  // Found non-null allocation.
    9121  if(nextAlloc2ndIndex < suballoc2ndCount)
    9122  {
    9123  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9124 
    9125  // 1. Process free space before this allocation.
    9126  if(lastOffset < suballoc.offset)
    9127  {
    9128  // There is free space from lastOffset to suballoc.offset.
    9129  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9130  ++outInfo.unusedRangeCount;
    9131  outInfo.unusedBytes += unusedRangeSize;
    9132  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9133  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9134  }
    9135 
    9136  // 2. Process this allocation.
    9137  // There is allocation with suballoc.offset, suballoc.size.
    9138  outInfo.usedBytes += suballoc.size;
    9139  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    9140  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    9141 
    9142  // 3. Prepare for next iteration.
    9143  lastOffset = suballoc.offset + suballoc.size;
    9144  ++nextAlloc2ndIndex;
    9145  }
    9146  // We are at the end.
    9147  else
    9148  {
    9149  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9150  if(lastOffset < freeSpace2ndTo1stEnd)
    9151  {
    9152  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    9153  ++outInfo.unusedRangeCount;
    9154  outInfo.unusedBytes += unusedRangeSize;
    9155  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9156  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9157  }
    9158 
    9159  // End of loop.
    9160  lastOffset = freeSpace2ndTo1stEnd;
    9161  }
    9162  }
    9163  }
    9164 
    9165  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9166  const VkDeviceSize freeSpace1stTo2ndEnd =
    9167  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    9168  while(lastOffset < freeSpace1stTo2ndEnd)
    9169  {
    9170  // Find next non-null allocation or move nextAllocIndex to the end.
    9171  while(nextAlloc1stIndex < suballoc1stCount &&
    9172  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9173  {
    9174  ++nextAlloc1stIndex;
    9175  }
    9176 
    9177  // Found non-null allocation.
    9178  if(nextAlloc1stIndex < suballoc1stCount)
    9179  {
    9180  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9181 
    9182  // 1. Process free space before this allocation.
    9183  if(lastOffset < suballoc.offset)
    9184  {
    9185  // There is free space from lastOffset to suballoc.offset.
    9186  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9187  ++outInfo.unusedRangeCount;
    9188  outInfo.unusedBytes += unusedRangeSize;
    9189  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9190  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9191  }
    9192 
    9193  // 2. Process this allocation.
    9194  // There is allocation with suballoc.offset, suballoc.size.
    9195  outInfo.usedBytes += suballoc.size;
    9196  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    9197  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    9198 
    9199  // 3. Prepare for next iteration.
    9200  lastOffset = suballoc.offset + suballoc.size;
    9201  ++nextAlloc1stIndex;
    9202  }
    9203  // We are at the end.
    9204  else
    9205  {
    9206  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9207  if(lastOffset < freeSpace1stTo2ndEnd)
    9208  {
    9209  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    9210  ++outInfo.unusedRangeCount;
    9211  outInfo.unusedBytes += unusedRangeSize;
    9212  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9213  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9214  }
    9215 
    9216  // End of loop.
    9217  lastOffset = freeSpace1stTo2ndEnd;
    9218  }
    9219  }
    9220 
    9221  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9222  {
    9223  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9224  while(lastOffset < size)
    9225  {
    9226  // Find next non-null allocation or move nextAllocIndex to the end.
    9227  while(nextAlloc2ndIndex != SIZE_MAX &&
    9228  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9229  {
    9230  --nextAlloc2ndIndex;
    9231  }
    9232 
    9233  // Found non-null allocation.
    9234  if(nextAlloc2ndIndex != SIZE_MAX)
    9235  {
    9236  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9237 
    9238  // 1. Process free space before this allocation.
    9239  if(lastOffset < suballoc.offset)
    9240  {
    9241  // There is free space from lastOffset to suballoc.offset.
    9242  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9243  ++outInfo.unusedRangeCount;
    9244  outInfo.unusedBytes += unusedRangeSize;
    9245  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9246  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9247  }
    9248 
    9249  // 2. Process this allocation.
    9250  // There is allocation with suballoc.offset, suballoc.size.
    9251  outInfo.usedBytes += suballoc.size;
    9252  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    9253  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    9254 
    9255  // 3. Prepare for next iteration.
    9256  lastOffset = suballoc.offset + suballoc.size;
    9257  --nextAlloc2ndIndex;
    9258  }
    9259  // We are at the end.
    9260  else
    9261  {
    9262  // There is free space from lastOffset to size.
    9263  if(lastOffset < size)
    9264  {
    9265  const VkDeviceSize unusedRangeSize = size - lastOffset;
    9266  ++outInfo.unusedRangeCount;
    9267  outInfo.unusedBytes += unusedRangeSize;
    9268  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9269  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9270  }
    9271 
    9272  // End of loop.
    9273  lastOffset = size;
    9274  }
    9275  }
    9276  }
    9277 
    9278  outInfo.unusedBytes = size - outInfo.usedBytes;
    9279 }
    9280 
    9281 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
    9282 {
    9283  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9284  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9285  const VkDeviceSize size = GetSize();
    9286  const size_t suballoc1stCount = suballocations1st.size();
    9287  const size_t suballoc2ndCount = suballocations2nd.size();
    9288 
    9289  inoutStats.size += size;
    9290 
    9291  VkDeviceSize lastOffset = 0;
    9292 
    9293  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9294  {
    9295  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9296  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
    9297  while(lastOffset < freeSpace2ndTo1stEnd)
    9298  {
    9299  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9300  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9301  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9302  {
    9303  ++nextAlloc2ndIndex;
    9304  }
    9305 
    9306  // Found non-null allocation.
    9307  if(nextAlloc2ndIndex < suballoc2ndCount)
    9308  {
    9309  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9310 
    9311  // 1. Process free space before this allocation.
    9312  if(lastOffset < suballoc.offset)
    9313  {
    9314  // There is free space from lastOffset to suballoc.offset.
    9315  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9316  inoutStats.unusedSize += unusedRangeSize;
    9317  ++inoutStats.unusedRangeCount;
    9318  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9319  }
    9320 
    9321  // 2. Process this allocation.
    9322  // There is allocation with suballoc.offset, suballoc.size.
    9323  ++inoutStats.allocationCount;
    9324 
    9325  // 3. Prepare for next iteration.
    9326  lastOffset = suballoc.offset + suballoc.size;
    9327  ++nextAlloc2ndIndex;
    9328  }
    9329  // We are at the end.
    9330  else
    9331  {
    9332  if(lastOffset < freeSpace2ndTo1stEnd)
    9333  {
    9334  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9335  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    9336  inoutStats.unusedSize += unusedRangeSize;
    9337  ++inoutStats.unusedRangeCount;
    9338  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9339  }
    9340 
    9341  // End of loop.
    9342  lastOffset = freeSpace2ndTo1stEnd;
    9343  }
    9344  }
    9345  }
    9346 
    9347  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9348  const VkDeviceSize freeSpace1stTo2ndEnd =
    9349  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    9350  while(lastOffset < freeSpace1stTo2ndEnd)
    9351  {
    9352  // Find next non-null allocation or move nextAllocIndex to the end.
    9353  while(nextAlloc1stIndex < suballoc1stCount &&
    9354  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9355  {
    9356  ++nextAlloc1stIndex;
    9357  }
    9358 
    9359  // Found non-null allocation.
    9360  if(nextAlloc1stIndex < suballoc1stCount)
    9361  {
    9362  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9363 
    9364  // 1. Process free space before this allocation.
    9365  if(lastOffset < suballoc.offset)
    9366  {
    9367  // There is free space from lastOffset to suballoc.offset.
    9368  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9369  inoutStats.unusedSize += unusedRangeSize;
    9370  ++inoutStats.unusedRangeCount;
    9371  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9372  }
    9373 
    9374  // 2. Process this allocation.
    9375  // There is allocation with suballoc.offset, suballoc.size.
    9376  ++inoutStats.allocationCount;
    9377 
    9378  // 3. Prepare for next iteration.
    9379  lastOffset = suballoc.offset + suballoc.size;
    9380  ++nextAlloc1stIndex;
    9381  }
    9382  // We are at the end.
    9383  else
    9384  {
    9385  if(lastOffset < freeSpace1stTo2ndEnd)
    9386  {
    9387  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9388  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    9389  inoutStats.unusedSize += unusedRangeSize;
    9390  ++inoutStats.unusedRangeCount;
    9391  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9392  }
    9393 
    9394  // End of loop.
    9395  lastOffset = freeSpace1stTo2ndEnd;
    9396  }
    9397  }
    9398 
    9399  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9400  {
    9401  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9402  while(lastOffset < size)
    9403  {
    9404  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9405  while(nextAlloc2ndIndex != SIZE_MAX &&
    9406  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9407  {
    9408  --nextAlloc2ndIndex;
    9409  }
    9410 
    9411  // Found non-null allocation.
    9412  if(nextAlloc2ndIndex != SIZE_MAX)
    9413  {
    9414  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9415 
    9416  // 1. Process free space before this allocation.
    9417  if(lastOffset < suballoc.offset)
    9418  {
    9419  // There is free space from lastOffset to suballoc.offset.
    9420  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9421  inoutStats.unusedSize += unusedRangeSize;
    9422  ++inoutStats.unusedRangeCount;
    9423  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9424  }
    9425 
    9426  // 2. Process this allocation.
    9427  // There is allocation with suballoc.offset, suballoc.size.
    9428  ++inoutStats.allocationCount;
    9429 
    9430  // 3. Prepare for next iteration.
    9431  lastOffset = suballoc.offset + suballoc.size;
    9432  --nextAlloc2ndIndex;
    9433  }
    9434  // We are at the end.
    9435  else
    9436  {
    9437  if(lastOffset < size)
    9438  {
    9439  // There is free space from lastOffset to size.
    9440  const VkDeviceSize unusedRangeSize = size - lastOffset;
    9441  inoutStats.unusedSize += unusedRangeSize;
    9442  ++inoutStats.unusedRangeCount;
    9443  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9444  }
    9445 
    9446  // End of loop.
    9447  lastOffset = size;
    9448  }
    9449  }
    9450  }
    9451 }
    9452 
    9453 #if VMA_STATS_STRING_ENABLED
    9454 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
    9455 {
    9456  const VkDeviceSize size = GetSize();
    9457  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9458  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9459  const size_t suballoc1stCount = suballocations1st.size();
    9460  const size_t suballoc2ndCount = suballocations2nd.size();
    9461 
    9462  // FIRST PASS
    9463 
    9464  size_t unusedRangeCount = 0;
    9465  VkDeviceSize usedBytes = 0;
    9466 
    9467  VkDeviceSize lastOffset = 0;
    9468 
    9469  size_t alloc2ndCount = 0;
    9470  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9471  {
    9472  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9473  size_t nextAlloc2ndIndex = 0;
    9474  while(lastOffset < freeSpace2ndTo1stEnd)
    9475  {
    9476  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9477  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9478  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9479  {
    9480  ++nextAlloc2ndIndex;
    9481  }
    9482 
    9483  // Found non-null allocation.
    9484  if(nextAlloc2ndIndex < suballoc2ndCount)
    9485  {
    9486  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9487 
    9488  // 1. Process free space before this allocation.
    9489  if(lastOffset < suballoc.offset)
    9490  {
    9491  // There is free space from lastOffset to suballoc.offset.
    9492  ++unusedRangeCount;
    9493  }
    9494 
    9495  // 2. Process this allocation.
    9496  // There is allocation with suballoc.offset, suballoc.size.
    9497  ++alloc2ndCount;
    9498  usedBytes += suballoc.size;
    9499 
    9500  // 3. Prepare for next iteration.
    9501  lastOffset = suballoc.offset + suballoc.size;
    9502  ++nextAlloc2ndIndex;
    9503  }
    9504  // We are at the end.
    9505  else
    9506  {
    9507  if(lastOffset < freeSpace2ndTo1stEnd)
    9508  {
    9509  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9510  ++unusedRangeCount;
    9511  }
    9512 
    9513  // End of loop.
    9514  lastOffset = freeSpace2ndTo1stEnd;
    9515  }
    9516  }
    9517  }
    9518 
    9519  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9520  size_t alloc1stCount = 0;
    9521  const VkDeviceSize freeSpace1stTo2ndEnd =
    9522  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    9523  while(lastOffset < freeSpace1stTo2ndEnd)
    9524  {
    9525  // Find next non-null allocation or move nextAllocIndex to the end.
    9526  while(nextAlloc1stIndex < suballoc1stCount &&
    9527  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9528  {
    9529  ++nextAlloc1stIndex;
    9530  }
    9531 
    9532  // Found non-null allocation.
    9533  if(nextAlloc1stIndex < suballoc1stCount)
    9534  {
    9535  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9536 
    9537  // 1. Process free space before this allocation.
    9538  if(lastOffset < suballoc.offset)
    9539  {
    9540  // There is free space from lastOffset to suballoc.offset.
    9541  ++unusedRangeCount;
    9542  }
    9543 
    9544  // 2. Process this allocation.
    9545  // There is allocation with suballoc.offset, suballoc.size.
    9546  ++alloc1stCount;
    9547  usedBytes += suballoc.size;
    9548 
    9549  // 3. Prepare for next iteration.
    9550  lastOffset = suballoc.offset + suballoc.size;
    9551  ++nextAlloc1stIndex;
    9552  }
    9553  // We are at the end.
    9554  else
    9555  {
    9556  if(lastOffset < size)
    9557  {
    9558  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9559  ++unusedRangeCount;
    9560  }
    9561 
    9562  // End of loop.
    9563  lastOffset = freeSpace1stTo2ndEnd;
    9564  }
    9565  }
    9566 
    9567  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9568  {
    9569  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9570  while(lastOffset < size)
    9571  {
    9572  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9573  while(nextAlloc2ndIndex != SIZE_MAX &&
    9574  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9575  {
    9576  --nextAlloc2ndIndex;
    9577  }
    9578 
    9579  // Found non-null allocation.
    9580  if(nextAlloc2ndIndex != SIZE_MAX)
    9581  {
    9582  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9583 
    9584  // 1. Process free space before this allocation.
    9585  if(lastOffset < suballoc.offset)
    9586  {
    9587  // There is free space from lastOffset to suballoc.offset.
    9588  ++unusedRangeCount;
    9589  }
    9590 
    9591  // 2. Process this allocation.
    9592  // There is allocation with suballoc.offset, suballoc.size.
    9593  ++alloc2ndCount;
    9594  usedBytes += suballoc.size;
    9595 
    9596  // 3. Prepare for next iteration.
    9597  lastOffset = suballoc.offset + suballoc.size;
    9598  --nextAlloc2ndIndex;
    9599  }
    9600  // We are at the end.
    9601  else
    9602  {
    9603  if(lastOffset < size)
    9604  {
    9605  // There is free space from lastOffset to size.
    9606  ++unusedRangeCount;
    9607  }
    9608 
    9609  // End of loop.
    9610  lastOffset = size;
    9611  }
    9612  }
    9613  }
    9614 
    9615  const VkDeviceSize unusedBytes = size - usedBytes;
    9616  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
    9617 
    9618  // SECOND PASS
    9619  lastOffset = 0;
    9620 
    9621  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9622  {
    9623  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9624  size_t nextAlloc2ndIndex = 0;
    9625  while(lastOffset < freeSpace2ndTo1stEnd)
    9626  {
    9627  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9628  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9629  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9630  {
    9631  ++nextAlloc2ndIndex;
    9632  }
    9633 
    9634  // Found non-null allocation.
    9635  if(nextAlloc2ndIndex < suballoc2ndCount)
    9636  {
    9637  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9638 
    9639  // 1. Process free space before this allocation.
    9640  if(lastOffset < suballoc.offset)
    9641  {
    9642  // There is free space from lastOffset to suballoc.offset.
    9643  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9644  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9645  }
    9646 
    9647  // 2. Process this allocation.
    9648  // There is allocation with suballoc.offset, suballoc.size.
    9649  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    9650 
    9651  // 3. Prepare for next iteration.
    9652  lastOffset = suballoc.offset + suballoc.size;
    9653  ++nextAlloc2ndIndex;
    9654  }
    9655  // We are at the end.
    9656  else
    9657  {
    9658  if(lastOffset < freeSpace2ndTo1stEnd)
    9659  {
    9660  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9661  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    9662  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9663  }
    9664 
    9665  // End of loop.
    9666  lastOffset = freeSpace2ndTo1stEnd;
    9667  }
    9668  }
    9669  }
    9670 
    9671  nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9672  while(lastOffset < freeSpace1stTo2ndEnd)
    9673  {
    9674  // Find next non-null allocation or move nextAllocIndex to the end.
    9675  while(nextAlloc1stIndex < suballoc1stCount &&
    9676  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9677  {
    9678  ++nextAlloc1stIndex;
    9679  }
    9680 
    9681  // Found non-null allocation.
    9682  if(nextAlloc1stIndex < suballoc1stCount)
    9683  {
    9684  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9685 
    9686  // 1. Process free space before this allocation.
    9687  if(lastOffset < suballoc.offset)
    9688  {
    9689  // There is free space from lastOffset to suballoc.offset.
    9690  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9691  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9692  }
    9693 
    9694  // 2. Process this allocation.
    9695  // There is allocation with suballoc.offset, suballoc.size.
    9696  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    9697 
    9698  // 3. Prepare for next iteration.
    9699  lastOffset = suballoc.offset + suballoc.size;
    9700  ++nextAlloc1stIndex;
    9701  }
    9702  // We are at the end.
    9703  else
    9704  {
    9705  if(lastOffset < freeSpace1stTo2ndEnd)
    9706  {
    9707  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9708  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    9709  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9710  }
    9711 
    9712  // End of loop.
    9713  lastOffset = freeSpace1stTo2ndEnd;
    9714  }
    9715  }
    9716 
    9717  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9718  {
    9719  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9720  while(lastOffset < size)
    9721  {
    9722  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9723  while(nextAlloc2ndIndex != SIZE_MAX &&
    9724  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9725  {
    9726  --nextAlloc2ndIndex;
    9727  }
    9728 
    9729  // Found non-null allocation.
    9730  if(nextAlloc2ndIndex != SIZE_MAX)
    9731  {
    9732  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9733 
    9734  // 1. Process free space before this allocation.
    9735  if(lastOffset < suballoc.offset)
    9736  {
    9737  // There is free space from lastOffset to suballoc.offset.
    9738  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9739  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9740  }
    9741 
    9742  // 2. Process this allocation.
    9743  // There is allocation with suballoc.offset, suballoc.size.
    9744  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    9745 
    9746  // 3. Prepare for next iteration.
    9747  lastOffset = suballoc.offset + suballoc.size;
    9748  --nextAlloc2ndIndex;
    9749  }
    9750  // We are at the end.
    9751  else
    9752  {
    9753  if(lastOffset < size)
    9754  {
    9755  // There is free space from lastOffset to size.
    9756  const VkDeviceSize unusedRangeSize = size - lastOffset;
    9757  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9758  }
    9759 
    9760  // End of loop.
    9761  lastOffset = size;
    9762  }
    9763  }
    9764  }
    9765 
    9766  PrintDetailedMap_End(json);
    9767 }
    9768 #endif // #if VMA_STATS_STRING_ENABLED
    9769 
    9770 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
    9771  uint32_t currentFrameIndex,
    9772  uint32_t frameInUseCount,
    9773  VkDeviceSize bufferImageGranularity,
    9774  VkDeviceSize allocSize,
    9775  VkDeviceSize allocAlignment,
    9776  bool upperAddress,
    9777  VmaSuballocationType allocType,
    9778  bool canMakeOtherLost,
    9779  uint32_t strategy,
    9780  VmaAllocationRequest* pAllocationRequest)
    9781 {
    9782  VMA_ASSERT(allocSize > 0);
    9783  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    9784  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    9785  VMA_HEAVY_ASSERT(Validate());
    9786  return upperAddress ?
    9787  CreateAllocationRequest_UpperAddress(
    9788  currentFrameIndex, frameInUseCount, bufferImageGranularity,
    9789  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
    9790  CreateAllocationRequest_LowerAddress(
    9791  currentFrameIndex, frameInUseCount, bufferImageGranularity,
    9792  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
    9793 }
    9794 
    9795 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
    9796  uint32_t currentFrameIndex,
    9797  uint32_t frameInUseCount,
    9798  VkDeviceSize bufferImageGranularity,
    9799  VkDeviceSize allocSize,
    9800  VkDeviceSize allocAlignment,
    9801  VmaSuballocationType allocType,
    9802  bool canMakeOtherLost,
    9803  uint32_t strategy,
    9804  VmaAllocationRequest* pAllocationRequest)
    9805 {
    9806  const VkDeviceSize size = GetSize();
    9807  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9808  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9809 
    9810  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9811  {
    9812  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
    9813  return false;
    9814  }
    9815 
    9816  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
    9817  if(allocSize > size)
    9818  {
    9819  return false;
    9820  }
    9821  VkDeviceSize resultBaseOffset = size - allocSize;
    9822  if(!suballocations2nd.empty())
    9823  {
    9824  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9825  resultBaseOffset = lastSuballoc.offset - allocSize;
    9826  if(allocSize > lastSuballoc.offset)
    9827  {
    9828  return false;
    9829  }
    9830  }
    9831 
    9832  // Start from offset equal to end of free space.
    9833  VkDeviceSize resultOffset = resultBaseOffset;
    9834 
    9835  // Apply VMA_DEBUG_MARGIN at the end.
    9836  if(VMA_DEBUG_MARGIN > 0)
    9837  {
    9838  if(resultOffset < VMA_DEBUG_MARGIN)
    9839  {
    9840  return false;
    9841  }
    9842  resultOffset -= VMA_DEBUG_MARGIN;
    9843  }
    9844 
    9845  // Apply alignment.
    9846  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
    9847 
    9848  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
    9849  // Make bigger alignment if necessary.
    9850  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    9851  {
    9852  bool bufferImageGranularityConflict = false;
    9853  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    9854  {
    9855  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    9856  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    9857  {
    9858  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
    9859  {
    9860  bufferImageGranularityConflict = true;
    9861  break;
    9862  }
    9863  }
    9864  else
    9865  // Already on previous page.
    9866  break;
    9867  }
    9868  if(bufferImageGranularityConflict)
    9869  {
    9870  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
    9871  }
    9872  }
    9873 
    9874  // There is enough free space.
    9875  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
    9876  suballocations1st.back().offset + suballocations1st.back().size :
    9877  0;
    9878  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
    9879  {
    9880  // Check previous suballocations for BufferImageGranularity conflicts.
    9881  // If conflict exists, allocation cannot be made here.
    9882  if(bufferImageGranularity > 1)
    9883  {
    9884  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    9885  {
    9886  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    9887  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    9888  {
    9889  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
    9890  {
    9891  return false;
    9892  }
    9893  }
    9894  else
    9895  {
    9896  // Already on next page.
    9897  break;
    9898  }
    9899  }
    9900  }
    9901 
    9902  // All tests passed: Success.
    9903  pAllocationRequest->offset = resultOffset;
    9904  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
    9905  pAllocationRequest->sumItemSize = 0;
    9906  // pAllocationRequest->item unused.
    9907  pAllocationRequest->itemsToMakeLostCount = 0;
    9908  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
    9909  return true;
    9910  }
    9911 
    9912  return false;
    9913 }
    9914 
    9915 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
    9916  uint32_t currentFrameIndex,
    9917  uint32_t frameInUseCount,
    9918  VkDeviceSize bufferImageGranularity,
    9919  VkDeviceSize allocSize,
    9920  VkDeviceSize allocAlignment,
    9921  VmaSuballocationType allocType,
    9922  bool canMakeOtherLost,
    9923  uint32_t strategy,
    9924  VmaAllocationRequest* pAllocationRequest)
    9925 {
    9926  const VkDeviceSize size = GetSize();
    9927  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9928  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9929 
    9930  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9931  {
    9932  // Try to allocate at the end of 1st vector.
    9933 
    9934  VkDeviceSize resultBaseOffset = 0;
    9935  if(!suballocations1st.empty())
    9936  {
    9937  const VmaSuballocation& lastSuballoc = suballocations1st.back();
    9938  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    9939  }
    9940 
    9941  // Start from offset equal to beginning of free space.
    9942  VkDeviceSize resultOffset = resultBaseOffset;
    9943 
    9944  // Apply VMA_DEBUG_MARGIN at the beginning.
    9945  if(VMA_DEBUG_MARGIN > 0)
    9946  {
    9947  resultOffset += VMA_DEBUG_MARGIN;
    9948  }
    9949 
    9950  // Apply alignment.
    9951  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    9952 
    9953  // Check previous suballocations for BufferImageGranularity conflicts.
    9954  // Make bigger alignment if necessary.
    9955  if(bufferImageGranularity > 1 && !suballocations1st.empty())
    9956  {
    9957  bool bufferImageGranularityConflict = false;
    9958  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    9959  {
    9960  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    9961  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    9962  {
    9963  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    9964  {
    9965  bufferImageGranularityConflict = true;
    9966  break;
    9967  }
    9968  }
    9969  else
    9970  // Already on previous page.
    9971  break;
    9972  }
    9973  if(bufferImageGranularityConflict)
    9974  {
    9975  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    9976  }
    9977  }
    9978 
    9979  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
    9980  suballocations2nd.back().offset : size;
    9981 
    9982  // There is enough free space at the end after alignment.
    9983  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
    9984  {
    9985  // Check next suballocations for BufferImageGranularity conflicts.
    9986  // If conflict exists, allocation cannot be made here.
    9987  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9988  {
    9989  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    9990  {
    9991  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    9992  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    9993  {
    9994  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    9995  {
    9996  return false;
    9997  }
    9998  }
    9999  else
    10000  {
    10001  // Already on previous page.
    10002  break;
    10003  }
    10004  }
    10005  }
    10006 
    10007  // All tests passed: Success.
    10008  pAllocationRequest->offset = resultOffset;
    10009  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
    10010  pAllocationRequest->sumItemSize = 0;
    10011  // pAllocationRequest->item, customData unused.
    10012  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
    10013  pAllocationRequest->itemsToMakeLostCount = 0;
    10014  return true;
    10015  }
    10016  }
    10017 
    10018  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
    10019  // beginning of 1st vector as the end of free space.
    10020  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    10021  {
    10022  VMA_ASSERT(!suballocations1st.empty());
    10023 
    10024  VkDeviceSize resultBaseOffset = 0;
    10025  if(!suballocations2nd.empty())
    10026  {
    10027  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    10028  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    10029  }
    10030 
    10031  // Start from offset equal to beginning of free space.
    10032  VkDeviceSize resultOffset = resultBaseOffset;
    10033 
    10034  // Apply VMA_DEBUG_MARGIN at the beginning.
    10035  if(VMA_DEBUG_MARGIN > 0)
    10036  {
    10037  resultOffset += VMA_DEBUG_MARGIN;
    10038  }
    10039 
    10040  // Apply alignment.
    10041  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    10042 
    10043  // Check previous suballocations for BufferImageGranularity conflicts.
    10044  // Make bigger alignment if necessary.
    10045  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    10046  {
    10047  bool bufferImageGranularityConflict = false;
    10048  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
    10049  {
    10050  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
    10051  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    10052  {
    10053  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    10054  {
    10055  bufferImageGranularityConflict = true;
    10056  break;
    10057  }
    10058  }
    10059  else
    10060  // Already on previous page.
    10061  break;
    10062  }
    10063  if(bufferImageGranularityConflict)
    10064  {
    10065  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    10066  }
    10067  }
    10068 
    10069  pAllocationRequest->itemsToMakeLostCount = 0;
    10070  pAllocationRequest->sumItemSize = 0;
    10071  size_t index1st = m_1stNullItemsBeginCount;
    10072 
    10073  if(canMakeOtherLost)
    10074  {
    10075  while(index1st < suballocations1st.size() &&
    10076  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
    10077  {
    10078  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
    10079  const VmaSuballocation& suballoc = suballocations1st[index1st];
    10080  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
    10081  {
    10082  // No problem.
    10083  }
    10084  else
    10085  {
    10086  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    10087  if(suballoc.hAllocation->CanBecomeLost() &&
    10088  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    10089  {
    10090  ++pAllocationRequest->itemsToMakeLostCount;
    10091  pAllocationRequest->sumItemSize += suballoc.size;
    10092  }
    10093  else
    10094  {
    10095  return false;
    10096  }
    10097  }
    10098  ++index1st;
    10099  }
    10100 
    10101  // Check next suballocations for BufferImageGranularity conflicts.
    10102  // If conflict exists, we must mark more allocations lost or fail.
    10103  if(bufferImageGranularity > 1)
    10104  {
    10105  while(index1st < suballocations1st.size())
    10106  {
    10107  const VmaSuballocation& suballoc = suballocations1st[index1st];
    10108  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
    10109  {
    10110  if(suballoc.hAllocation != VK_NULL_HANDLE)
    10111  {
    10112  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
    10113  if(suballoc.hAllocation->CanBecomeLost() &&
    10114  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    10115  {
    10116  ++pAllocationRequest->itemsToMakeLostCount;
    10117  pAllocationRequest->sumItemSize += suballoc.size;
    10118  }
    10119  else
    10120  {
    10121  return false;
    10122  }
    10123  }
    10124  }
    10125  else
    10126  {
    10127  // Already on next page.
    10128  break;
    10129  }
    10130  ++index1st;
    10131  }
    10132  }
    10133 
    10134  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
    10135  if(index1st == suballocations1st.size() &&
    10136  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
    10137  {
    10138  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
    10139  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
    10140  }
    10141  }
    10142 
    10143  // There is enough free space at the end after alignment.
    10144  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
    10145  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
    10146  {
    10147  // Check next suballocations for BufferImageGranularity conflicts.
    10148  // If conflict exists, allocation cannot be made here.
    10149  if(bufferImageGranularity > 1)
    10150  {
    10151  for(size_t nextSuballocIndex = index1st;
    10152  nextSuballocIndex < suballocations1st.size();
    10153  nextSuballocIndex++)
    10154  {
    10155  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
    10156  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    10157  {
    10158  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    10159  {
    10160  return false;
    10161  }
    10162  }
    10163  else
    10164  {
    10165  // Already on next page.
    10166  break;
    10167  }
    10168  }
    10169  }
    10170 
    10171  // All tests passed: Success.
    10172  pAllocationRequest->offset = resultOffset;
    10173  pAllocationRequest->sumFreeSize =
    10174  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
    10175  - resultBaseOffset
    10176  - pAllocationRequest->sumItemSize;
    10177  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
    10178  // pAllocationRequest->item, customData unused.
    10179  return true;
    10180  }
    10181  }
    10182 
    10183  return false;
    10184 }
    10185 
    10186 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
    10187  uint32_t currentFrameIndex,
    10188  uint32_t frameInUseCount,
    10189  VmaAllocationRequest* pAllocationRequest)
    10190 {
    10191  if(pAllocationRequest->itemsToMakeLostCount == 0)
    10192  {
    10193  return true;
    10194  }
    10195 
    10196  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
    10197 
    10198  // We always start from 1st.
    10199  SuballocationVectorType* suballocations = &AccessSuballocations1st();
    10200  size_t index = m_1stNullItemsBeginCount;
    10201  size_t madeLostCount = 0;
    10202  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
    10203  {
    10204  if(index == suballocations->size())
    10205  {
    10206  index = 0;
    10207  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
    10208  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    10209  {
    10210  suballocations = &AccessSuballocations2nd();
    10211  }
    10212  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
    10213  // suballocations continues pointing at AccessSuballocations1st().
    10214  VMA_ASSERT(!suballocations->empty());
    10215  }
    10216  VmaSuballocation& suballoc = (*suballocations)[index];
    10217  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    10218  {
    10219  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    10220  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
    10221  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    10222  {
    10223  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10224  suballoc.hAllocation = VK_NULL_HANDLE;
    10225  m_SumFreeSize += suballoc.size;
    10226  if(suballocations == &AccessSuballocations1st())
    10227  {
    10228  ++m_1stNullItemsMiddleCount;
    10229  }
    10230  else
    10231  {
    10232  ++m_2ndNullItemsCount;
    10233  }
    10234  ++madeLostCount;
    10235  }
    10236  else
    10237  {
    10238  return false;
    10239  }
    10240  }
    10241  ++index;
    10242  }
    10243 
    10244  CleanupAfterFree();
    10245  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
    10246 
    10247  return true;
    10248 }
    10249 
    10250 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    10251 {
    10252  uint32_t lostAllocationCount = 0;
    10253 
    10254  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10255  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    10256  {
    10257  VmaSuballocation& suballoc = suballocations1st[i];
    10258  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    10259  suballoc.hAllocation->CanBecomeLost() &&
    10260  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    10261  {
    10262  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10263  suballoc.hAllocation = VK_NULL_HANDLE;
    10264  ++m_1stNullItemsMiddleCount;
    10265  m_SumFreeSize += suballoc.size;
    10266  ++lostAllocationCount;
    10267  }
    10268  }
    10269 
    10270  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10271  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    10272  {
    10273  VmaSuballocation& suballoc = suballocations2nd[i];
    10274  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    10275  suballoc.hAllocation->CanBecomeLost() &&
    10276  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    10277  {
    10278  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10279  suballoc.hAllocation = VK_NULL_HANDLE;
    10280  ++m_2ndNullItemsCount;
    10281  m_SumFreeSize += suballoc.size;
    10282  ++lostAllocationCount;
    10283  }
    10284  }
    10285 
    10286  if(lostAllocationCount)
    10287  {
    10288  CleanupAfterFree();
    10289  }
    10290 
    10291  return lostAllocationCount;
    10292 }
    10293 
    10294 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
    10295 {
    10296  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10297  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    10298  {
    10299  const VmaSuballocation& suballoc = suballocations1st[i];
    10300  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    10301  {
    10302  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    10303  {
    10304  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    10305  return VK_ERROR_VALIDATION_FAILED_EXT;
    10306  }
    10307  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    10308  {
    10309  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    10310  return VK_ERROR_VALIDATION_FAILED_EXT;
    10311  }
    10312  }
    10313  }
    10314 
    10315  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10316  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    10317  {
    10318  const VmaSuballocation& suballoc = suballocations2nd[i];
    10319  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    10320  {
    10321  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    10322  {
    10323  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    10324  return VK_ERROR_VALIDATION_FAILED_EXT;
    10325  }
    10326  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    10327  {
    10328  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    10329  return VK_ERROR_VALIDATION_FAILED_EXT;
    10330  }
    10331  }
    10332  }
    10333 
    10334  return VK_SUCCESS;
    10335 }
    10336 
    10337 void VmaBlockMetadata_Linear::Alloc(
    10338  const VmaAllocationRequest& request,
    10339  VmaSuballocationType type,
    10340  VkDeviceSize allocSize,
    10341  VmaAllocation hAllocation)
    10342 {
    10343  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
    10344 
    10345  switch(request.type)
    10346  {
    10347  case VmaAllocationRequestType::UpperAddress:
    10348  {
    10349  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
    10350  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
    10351  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10352  suballocations2nd.push_back(newSuballoc);
    10353  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
    10354  }
    10355  break;
    10356  case VmaAllocationRequestType::EndOf1st:
    10357  {
    10358  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10359 
    10360  VMA_ASSERT(suballocations1st.empty() ||
    10361  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
    10362  // Check if it fits before the end of the block.
    10363  VMA_ASSERT(request.offset + allocSize <= GetSize());
    10364 
    10365  suballocations1st.push_back(newSuballoc);
    10366  }
    10367  break;
    10368  case VmaAllocationRequestType::EndOf2nd:
    10369  {
    10370  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10371  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
    10372  VMA_ASSERT(!suballocations1st.empty() &&
    10373  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
    10374  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10375 
    10376  switch(m_2ndVectorMode)
    10377  {
    10378  case SECOND_VECTOR_EMPTY:
    10379  // First allocation from second part ring buffer.
    10380  VMA_ASSERT(suballocations2nd.empty());
    10381  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
    10382  break;
    10383  case SECOND_VECTOR_RING_BUFFER:
    10384  // 2-part ring buffer is already started.
    10385  VMA_ASSERT(!suballocations2nd.empty());
    10386  break;
    10387  case SECOND_VECTOR_DOUBLE_STACK:
    10388  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
    10389  break;
    10390  default:
    10391  VMA_ASSERT(0);
    10392  }
    10393 
    10394  suballocations2nd.push_back(newSuballoc);
    10395  }
    10396  break;
    10397  default:
    10398  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
    10399  }
    10400 
    10401  m_SumFreeSize -= newSuballoc.size;
    10402 }
    10403 
    10404 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
    10405 {
    10406  FreeAtOffset(allocation->GetOffset());
    10407 }
    10408 
    10409 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
    10410 {
    10411  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10412  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10413 
    10414  if(!suballocations1st.empty())
    10415  {
    10416  // First allocation: Mark it as next empty at the beginning.
    10417  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    10418  if(firstSuballoc.offset == offset)
    10419  {
    10420  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10421  firstSuballoc.hAllocation = VK_NULL_HANDLE;
    10422  m_SumFreeSize += firstSuballoc.size;
    10423  ++m_1stNullItemsBeginCount;
    10424  CleanupAfterFree();
    10425  return;
    10426  }
    10427  }
    10428 
    10429  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
    10430  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
    10431  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    10432  {
    10433  VmaSuballocation& lastSuballoc = suballocations2nd.back();
    10434  if(lastSuballoc.offset == offset)
    10435  {
    10436  m_SumFreeSize += lastSuballoc.size;
    10437  suballocations2nd.pop_back();
    10438  CleanupAfterFree();
    10439  return;
    10440  }
    10441  }
    10442  // Last allocation in 1st vector.
    10443  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
    10444  {
    10445  VmaSuballocation& lastSuballoc = suballocations1st.back();
    10446  if(lastSuballoc.offset == offset)
    10447  {
    10448  m_SumFreeSize += lastSuballoc.size;
    10449  suballocations1st.pop_back();
    10450  CleanupAfterFree();
    10451  return;
    10452  }
    10453  }
    10454 
    10455  // Item from the middle of 1st vector.
    10456  {
    10457  VmaSuballocation refSuballoc;
    10458  refSuballoc.offset = offset;
    10459  // Rest of members stays uninitialized intentionally for better performance.
    10460  SuballocationVectorType::iterator it = VmaBinaryFindSorted(
    10461  suballocations1st.begin() + m_1stNullItemsBeginCount,
    10462  suballocations1st.end(),
    10463  refSuballoc,
    10464  VmaSuballocationOffsetLess());
    10465  if(it != suballocations1st.end())
    10466  {
    10467  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    10468  it->hAllocation = VK_NULL_HANDLE;
    10469  ++m_1stNullItemsMiddleCount;
    10470  m_SumFreeSize += it->size;
    10471  CleanupAfterFree();
    10472  return;
    10473  }
    10474  }
    10475 
    10476  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
    10477  {
    10478  // Item from the middle of 2nd vector.
    10479  VmaSuballocation refSuballoc;
    10480  refSuballoc.offset = offset;
    10481  // Rest of members stays uninitialized intentionally for better performance.
    10482  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
    10483  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
    10484  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
    10485  if(it != suballocations2nd.end())
    10486  {
    10487  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    10488  it->hAllocation = VK_NULL_HANDLE;
    10489  ++m_2ndNullItemsCount;
    10490  m_SumFreeSize += it->size;
    10491  CleanupAfterFree();
    10492  return;
    10493  }
    10494  }
    10495 
    10496  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
    10497 }
    10498 
    10499 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
    10500 {
    10501  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    10502  const size_t suballocCount = AccessSuballocations1st().size();
    10503  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
    10504 }
    10505 
    10506 void VmaBlockMetadata_Linear::CleanupAfterFree()
    10507 {
    10508  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10509  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10510 
    10511  if(IsEmpty())
    10512  {
    10513  suballocations1st.clear();
    10514  suballocations2nd.clear();
    10515  m_1stNullItemsBeginCount = 0;
    10516  m_1stNullItemsMiddleCount = 0;
    10517  m_2ndNullItemsCount = 0;
    10518  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    10519  }
    10520  else
    10521  {
    10522  const size_t suballoc1stCount = suballocations1st.size();
    10523  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    10524  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
    10525 
    10526  // Find more null items at the beginning of 1st vector.
    10527  while(m_1stNullItemsBeginCount < suballoc1stCount &&
    10528  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    10529  {
    10530  ++m_1stNullItemsBeginCount;
    10531  --m_1stNullItemsMiddleCount;
    10532  }
    10533 
    10534  // Find more null items at the end of 1st vector.
    10535  while(m_1stNullItemsMiddleCount > 0 &&
    10536  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
    10537  {
    10538  --m_1stNullItemsMiddleCount;
    10539  suballocations1st.pop_back();
    10540  }
    10541 
    10542  // Find more null items at the end of 2nd vector.
    10543  while(m_2ndNullItemsCount > 0 &&
    10544  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
    10545  {
    10546  --m_2ndNullItemsCount;
    10547  suballocations2nd.pop_back();
    10548  }
    10549 
    10550  // Find more null items at the beginning of 2nd vector.
    10551  while(m_2ndNullItemsCount > 0 &&
    10552  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
    10553  {
    10554  --m_2ndNullItemsCount;
    10555  VmaVectorRemove(suballocations2nd, 0);
    10556  }
    10557 
    10558  if(ShouldCompact1st())
    10559  {
    10560  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
    10561  size_t srcIndex = m_1stNullItemsBeginCount;
    10562  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
    10563  {
    10564  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
    10565  {
    10566  ++srcIndex;
    10567  }
    10568  if(dstIndex != srcIndex)
    10569  {
    10570  suballocations1st[dstIndex] = suballocations1st[srcIndex];
    10571  }
    10572  ++srcIndex;
    10573  }
    10574  suballocations1st.resize(nonNullItemCount);
    10575  m_1stNullItemsBeginCount = 0;
    10576  m_1stNullItemsMiddleCount = 0;
    10577  }
    10578 
    10579  // 2nd vector became empty.
    10580  if(suballocations2nd.empty())
    10581  {
    10582  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    10583  }
    10584 
    10585  // 1st vector became empty.
    10586  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
    10587  {
    10588  suballocations1st.clear();
    10589  m_1stNullItemsBeginCount = 0;
    10590 
    10591  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    10592  {
    10593  // Swap 1st with 2nd. Now 2nd is empty.
    10594  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    10595  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
    10596  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
    10597  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    10598  {
    10599  ++m_1stNullItemsBeginCount;
    10600  --m_1stNullItemsMiddleCount;
    10601  }
    10602  m_2ndNullItemsCount = 0;
    10603  m_1stVectorIndex ^= 1;
    10604  }
    10605  }
    10606  }
    10607 
    10608  VMA_HEAVY_ASSERT(Validate());
    10609 }
    10610 
    10611 
    10613 // class VmaBlockMetadata_Buddy
    10614 
    10615 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
    10616  VmaBlockMetadata(hAllocator),
    10617  m_Root(VMA_NULL),
    10618  m_AllocationCount(0),
    10619  m_FreeCount(1),
    10620  m_SumFreeSize(0)
    10621 {
    10622  memset(m_FreeList, 0, sizeof(m_FreeList));
    10623 }
    10624 
    10625 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
    10626 {
    10627  DeleteNode(m_Root);
    10628 }
    10629 
    10630 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
    10631 {
    10632  VmaBlockMetadata::Init(size);
    10633 
    10634  m_UsableSize = VmaPrevPow2(size);
    10635  m_SumFreeSize = m_UsableSize;
    10636 
    10637  // Calculate m_LevelCount.
    10638  m_LevelCount = 1;
    10639  while(m_LevelCount < MAX_LEVELS &&
    10640  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
    10641  {
    10642  ++m_LevelCount;
    10643  }
    10644 
    10645  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
    10646  rootNode->offset = 0;
    10647  rootNode->type = Node::TYPE_FREE;
    10648  rootNode->parent = VMA_NULL;
    10649  rootNode->buddy = VMA_NULL;
    10650 
    10651  m_Root = rootNode;
    10652  AddToFreeListFront(0, rootNode);
    10653 }
    10654 
    10655 bool VmaBlockMetadata_Buddy::Validate() const
    10656 {
    10657  // Validate tree.
    10658  ValidationContext ctx;
    10659  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
    10660  {
    10661  VMA_VALIDATE(false && "ValidateNode failed.");
    10662  }
    10663  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
    10664  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
    10665 
    10666  // Validate free node lists.
    10667  for(uint32_t level = 0; level < m_LevelCount; ++level)
    10668  {
    10669  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
    10670  m_FreeList[level].front->free.prev == VMA_NULL);
    10671 
    10672  for(Node* node = m_FreeList[level].front;
    10673  node != VMA_NULL;
    10674  node = node->free.next)
    10675  {
    10676  VMA_VALIDATE(node->type == Node::TYPE_FREE);
    10677 
    10678  if(node->free.next == VMA_NULL)
    10679  {
    10680  VMA_VALIDATE(m_FreeList[level].back == node);
    10681  }
    10682  else
    10683  {
    10684  VMA_VALIDATE(node->free.next->free.prev == node);
    10685  }
    10686  }
    10687  }
    10688 
    10689  // Validate that free lists ar higher levels are empty.
    10690  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
    10691  {
    10692  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
    10693  }
    10694 
    10695  return true;
    10696 }
    10697 
    10698 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
    10699 {
    10700  for(uint32_t level = 0; level < m_LevelCount; ++level)
    10701  {
    10702  if(m_FreeList[level].front != VMA_NULL)
    10703  {
    10704  return LevelToNodeSize(level);
    10705  }
    10706  }
    10707  return 0;
    10708 }
    10709 
    10710 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    10711 {
    10712  const VkDeviceSize unusableSize = GetUnusableSize();
    10713 
    10714  outInfo.blockCount = 1;
    10715 
    10716  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
    10717  outInfo.usedBytes = outInfo.unusedBytes = 0;
    10718 
    10719  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
    10720  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
    10721  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
    10722 
    10723  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
    10724 
    10725  if(unusableSize > 0)
    10726  {
    10727  ++outInfo.unusedRangeCount;
    10728  outInfo.unusedBytes += unusableSize;
    10729  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
    10730  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
    10731  }
    10732 }
    10733 
    10734 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
    10735 {
    10736  const VkDeviceSize unusableSize = GetUnusableSize();
    10737 
    10738  inoutStats.size += GetSize();
    10739  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
    10740  inoutStats.allocationCount += m_AllocationCount;
    10741  inoutStats.unusedRangeCount += m_FreeCount;
    10742  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    10743 
    10744  if(unusableSize > 0)
    10745  {
    10746  ++inoutStats.unusedRangeCount;
    10747  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
    10748  }
    10749 }
    10750 
    10751 #if VMA_STATS_STRING_ENABLED
    10752 
    10753 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
    10754 {
    10755  // TODO optimize
    10756  VmaStatInfo stat;
    10757  CalcAllocationStatInfo(stat);
    10758 
    10759  PrintDetailedMap_Begin(
    10760  json,
    10761  stat.unusedBytes,
    10762  stat.allocationCount,
    10763  stat.unusedRangeCount);
    10764 
    10765  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
    10766 
    10767  const VkDeviceSize unusableSize = GetUnusableSize();
    10768  if(unusableSize > 0)
    10769  {
    10770  PrintDetailedMap_UnusedRange(json,
    10771  m_UsableSize, // offset
    10772  unusableSize); // size
    10773  }
    10774 
    10775  PrintDetailedMap_End(json);
    10776 }
    10777 
    10778 #endif // #if VMA_STATS_STRING_ENABLED
    10779 
    10780 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
    10781  uint32_t currentFrameIndex,
    10782  uint32_t frameInUseCount,
    10783  VkDeviceSize bufferImageGranularity,
    10784  VkDeviceSize allocSize,
    10785  VkDeviceSize allocAlignment,
    10786  bool upperAddress,
    10787  VmaSuballocationType allocType,
    10788  bool canMakeOtherLost,
    10789  uint32_t strategy,
    10790  VmaAllocationRequest* pAllocationRequest)
    10791 {
    10792  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
    10793 
    10794  // Simple way to respect bufferImageGranularity. May be optimized some day.
    10795  // Whenever it might be an OPTIMAL image...
    10796  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
    10797  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    10798  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
    10799  {
    10800  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
    10801  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
    10802  }
    10803 
    10804  if(allocSize > m_UsableSize)
    10805  {
    10806  return false;
    10807  }
    10808 
    10809  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    10810  for(uint32_t level = targetLevel + 1; level--; )
    10811  {
    10812  for(Node* freeNode = m_FreeList[level].front;
    10813  freeNode != VMA_NULL;
    10814  freeNode = freeNode->free.next)
    10815  {
    10816  if(freeNode->offset % allocAlignment == 0)
    10817  {
    10818  pAllocationRequest->type = VmaAllocationRequestType::Normal;
    10819  pAllocationRequest->offset = freeNode->offset;
    10820  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
    10821  pAllocationRequest->sumItemSize = 0;
    10822  pAllocationRequest->itemsToMakeLostCount = 0;
    10823  pAllocationRequest->customData = (void*)(uintptr_t)level;
    10824  return true;
    10825  }
    10826  }
    10827  }
    10828 
    10829  return false;
    10830 }
    10831 
    10832 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
    10833  uint32_t currentFrameIndex,
    10834  uint32_t frameInUseCount,
    10835  VmaAllocationRequest* pAllocationRequest)
    10836 {
    10837  /*
    10838  Lost allocations are not supported in buddy allocator at the moment.
    10839  Support might be added in the future.
    10840  */
    10841  return pAllocationRequest->itemsToMakeLostCount == 0;
    10842 }
    10843 
    10844 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    10845 {
    10846  /*
    10847  Lost allocations are not supported in buddy allocator at the moment.
    10848  Support might be added in the future.
    10849  */
    10850  return 0;
    10851 }
    10852 
    10853 void VmaBlockMetadata_Buddy::Alloc(
    10854  const VmaAllocationRequest& request,
    10855  VmaSuballocationType type,
    10856  VkDeviceSize allocSize,
    10857  VmaAllocation hAllocation)
    10858 {
    10859  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
    10860 
    10861  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    10862  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
    10863 
    10864  Node* currNode = m_FreeList[currLevel].front;
    10865  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    10866  while(currNode->offset != request.offset)
    10867  {
    10868  currNode = currNode->free.next;
    10869  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    10870  }
    10871 
    10872  // Go down, splitting free nodes.
    10873  while(currLevel < targetLevel)
    10874  {
    10875  // currNode is already first free node at currLevel.
    10876  // Remove it from list of free nodes at this currLevel.
    10877  RemoveFromFreeList(currLevel, currNode);
    10878 
    10879  const uint32_t childrenLevel = currLevel + 1;
    10880 
    10881  // Create two free sub-nodes.
    10882  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
    10883  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
    10884 
    10885  leftChild->offset = currNode->offset;
    10886  leftChild->type = Node::TYPE_FREE;
    10887  leftChild->parent = currNode;
    10888  leftChild->buddy = rightChild;
    10889 
    10890  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
    10891  rightChild->type = Node::TYPE_FREE;
    10892  rightChild->parent = currNode;
    10893  rightChild->buddy = leftChild;
    10894 
    10895  // Convert current currNode to split type.
    10896  currNode->type = Node::TYPE_SPLIT;
    10897  currNode->split.leftChild = leftChild;
    10898 
    10899  // Add child nodes to free list. Order is important!
    10900  AddToFreeListFront(childrenLevel, rightChild);
    10901  AddToFreeListFront(childrenLevel, leftChild);
    10902 
    10903  ++m_FreeCount;
    10904  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
    10905  ++currLevel;
    10906  currNode = m_FreeList[currLevel].front;
    10907 
    10908  /*
    10909  We can be sure that currNode, as left child of node previously split,
    10910  also fullfills the alignment requirement.
    10911  */
    10912  }
    10913 
    10914  // Remove from free list.
    10915  VMA_ASSERT(currLevel == targetLevel &&
    10916  currNode != VMA_NULL &&
    10917  currNode->type == Node::TYPE_FREE);
    10918  RemoveFromFreeList(currLevel, currNode);
    10919 
    10920  // Convert to allocation node.
    10921  currNode->type = Node::TYPE_ALLOCATION;
    10922  currNode->allocation.alloc = hAllocation;
    10923 
    10924  ++m_AllocationCount;
    10925  --m_FreeCount;
    10926  m_SumFreeSize -= allocSize;
    10927 }
    10928 
    10929 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
    10930 {
    10931  if(node->type == Node::TYPE_SPLIT)
    10932  {
    10933  DeleteNode(node->split.leftChild->buddy);
    10934  DeleteNode(node->split.leftChild);
    10935  }
    10936 
    10937  vma_delete(GetAllocationCallbacks(), node);
    10938 }
    10939 
    10940 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
    10941 {
    10942  VMA_VALIDATE(level < m_LevelCount);
    10943  VMA_VALIDATE(curr->parent == parent);
    10944  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
    10945  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
    10946  switch(curr->type)
    10947  {
    10948  case Node::TYPE_FREE:
    10949  // curr->free.prev, next are validated separately.
    10950  ctx.calculatedSumFreeSize += levelNodeSize;
    10951  ++ctx.calculatedFreeCount;
    10952  break;
    10953  case Node::TYPE_ALLOCATION:
    10954  ++ctx.calculatedAllocationCount;
    10955  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
    10956  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
    10957  break;
    10958  case Node::TYPE_SPLIT:
    10959  {
    10960  const uint32_t childrenLevel = level + 1;
    10961  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
    10962  const Node* const leftChild = curr->split.leftChild;
    10963  VMA_VALIDATE(leftChild != VMA_NULL);
    10964  VMA_VALIDATE(leftChild->offset == curr->offset);
    10965  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
    10966  {
    10967  VMA_VALIDATE(false && "ValidateNode for left child failed.");
    10968  }
    10969  const Node* const rightChild = leftChild->buddy;
    10970  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
    10971  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
    10972  {
    10973  VMA_VALIDATE(false && "ValidateNode for right child failed.");
    10974  }
    10975  }
    10976  break;
    10977  default:
    10978  return false;
    10979  }
    10980 
    10981  return true;
    10982 }
    10983 
    10984 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
    10985 {
    10986  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
    10987  uint32_t level = 0;
    10988  VkDeviceSize currLevelNodeSize = m_UsableSize;
    10989  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
    10990  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
    10991  {
    10992  ++level;
    10993  currLevelNodeSize = nextLevelNodeSize;
    10994  nextLevelNodeSize = currLevelNodeSize >> 1;
    10995  }
    10996  return level;
    10997 }
    10998 
    10999 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
    11000 {
    11001  // Find node and level.
    11002  Node* node = m_Root;
    11003  VkDeviceSize nodeOffset = 0;
    11004  uint32_t level = 0;
    11005  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
    11006  while(node->type == Node::TYPE_SPLIT)
    11007  {
    11008  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
    11009  if(offset < nodeOffset + nextLevelSize)
    11010  {
    11011  node = node->split.leftChild;
    11012  }
    11013  else
    11014  {
    11015  node = node->split.leftChild->buddy;
    11016  nodeOffset += nextLevelSize;
    11017  }
    11018  ++level;
    11019  levelNodeSize = nextLevelSize;
    11020  }
    11021 
    11022  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
    11023  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
    11024 
    11025  ++m_FreeCount;
    11026  --m_AllocationCount;
    11027  m_SumFreeSize += alloc->GetSize();
    11028 
    11029  node->type = Node::TYPE_FREE;
    11030 
    11031  // Join free nodes if possible.
    11032  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
    11033  {
    11034  RemoveFromFreeList(level, node->buddy);
    11035  Node* const parent = node->parent;
    11036 
    11037  vma_delete(GetAllocationCallbacks(), node->buddy);
    11038  vma_delete(GetAllocationCallbacks(), node);
    11039  parent->type = Node::TYPE_FREE;
    11040 
    11041  node = parent;
    11042  --level;
    11043  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
    11044  --m_FreeCount;
    11045  }
    11046 
    11047  AddToFreeListFront(level, node);
    11048 }
    11049 
    11050 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
    11051 {
    11052  switch(node->type)
    11053  {
    11054  case Node::TYPE_FREE:
    11055  ++outInfo.unusedRangeCount;
    11056  outInfo.unusedBytes += levelNodeSize;
    11057  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
    11058  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
    11059  break;
    11060  case Node::TYPE_ALLOCATION:
    11061  {
    11062  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    11063  ++outInfo.allocationCount;
    11064  outInfo.usedBytes += allocSize;
    11065  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
    11066  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
    11067 
    11068  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
    11069  if(unusedRangeSize > 0)
    11070  {
    11071  ++outInfo.unusedRangeCount;
    11072  outInfo.unusedBytes += unusedRangeSize;
    11073  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
    11074  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
    11075  }
    11076  }
    11077  break;
    11078  case Node::TYPE_SPLIT:
    11079  {
    11080  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    11081  const Node* const leftChild = node->split.leftChild;
    11082  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
    11083  const Node* const rightChild = leftChild->buddy;
    11084  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
    11085  }
    11086  break;
    11087  default:
    11088  VMA_ASSERT(0);
    11089  }
    11090 }
    11091 
    11092 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
    11093 {
    11094  VMA_ASSERT(node->type == Node::TYPE_FREE);
    11095 
    11096  // List is empty.
    11097  Node* const frontNode = m_FreeList[level].front;
    11098  if(frontNode == VMA_NULL)
    11099  {
    11100  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
    11101  node->free.prev = node->free.next = VMA_NULL;
    11102  m_FreeList[level].front = m_FreeList[level].back = node;
    11103  }
    11104  else
    11105  {
    11106  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
    11107  node->free.prev = VMA_NULL;
    11108  node->free.next = frontNode;
    11109  frontNode->free.prev = node;
    11110  m_FreeList[level].front = node;
    11111  }
    11112 }
    11113 
    11114 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
    11115 {
    11116  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
    11117 
    11118  // It is at the front.
    11119  if(node->free.prev == VMA_NULL)
    11120  {
    11121  VMA_ASSERT(m_FreeList[level].front == node);
    11122  m_FreeList[level].front = node->free.next;
    11123  }
    11124  else
    11125  {
    11126  Node* const prevFreeNode = node->free.prev;
    11127  VMA_ASSERT(prevFreeNode->free.next == node);
    11128  prevFreeNode->free.next = node->free.next;
    11129  }
    11130 
    11131  // It is at the back.
    11132  if(node->free.next == VMA_NULL)
    11133  {
    11134  VMA_ASSERT(m_FreeList[level].back == node);
    11135  m_FreeList[level].back = node->free.prev;
    11136  }
    11137  else
    11138  {
    11139  Node* const nextFreeNode = node->free.next;
    11140  VMA_ASSERT(nextFreeNode->free.prev == node);
    11141  nextFreeNode->free.prev = node->free.prev;
    11142  }
    11143 }
    11144 
    11145 #if VMA_STATS_STRING_ENABLED
    11146 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
    11147 {
    11148  switch(node->type)
    11149  {
    11150  case Node::TYPE_FREE:
    11151  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
    11152  break;
    11153  case Node::TYPE_ALLOCATION:
    11154  {
    11155  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
    11156  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    11157  if(allocSize < levelNodeSize)
    11158  {
    11159  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
    11160  }
    11161  }
    11162  break;
    11163  case Node::TYPE_SPLIT:
    11164  {
    11165  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    11166  const Node* const leftChild = node->split.leftChild;
    11167  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
    11168  const Node* const rightChild = leftChild->buddy;
    11169  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
    11170  }
    11171  break;
    11172  default:
    11173  VMA_ASSERT(0);
    11174  }
    11175 }
    11176 #endif // #if VMA_STATS_STRING_ENABLED
    11177 
    11178 
    11180 // class VmaDeviceMemoryBlock
    11181 
    11182 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    11183  m_pMetadata(VMA_NULL),
    11184  m_MemoryTypeIndex(UINT32_MAX),
    11185  m_Id(0),
    11186  m_hMemory(VK_NULL_HANDLE),
    11187  m_MapCount(0),
    11188  m_pMappedData(VMA_NULL)
    11189 {
    11190 }
    11191 
    11192 void VmaDeviceMemoryBlock::Init(
    11193  VmaAllocator hAllocator,
    11194  VmaPool hParentPool,
    11195  uint32_t newMemoryTypeIndex,
    11196  VkDeviceMemory newMemory,
    11197  VkDeviceSize newSize,
    11198  uint32_t id,
    11199  uint32_t algorithm)
    11200 {
    11201  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    11202 
    11203  m_hParentPool = hParentPool;
    11204  m_MemoryTypeIndex = newMemoryTypeIndex;
    11205  m_Id = id;
    11206  m_hMemory = newMemory;
    11207 
    11208  switch(algorithm)
    11209  {
    11211  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
    11212  break;
    11214  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
    11215  break;
    11216  default:
    11217  VMA_ASSERT(0);
    11218  // Fall-through.
    11219  case 0:
    11220  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
    11221  }
    11222  m_pMetadata->Init(newSize);
    11223 }
    11224 
    11225 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    11226 {
    11227  // This is the most important assert in the entire library.
    11228  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    11229  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    11230 
    11231  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    11232  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
    11233  m_hMemory = VK_NULL_HANDLE;
    11234 
    11235  vma_delete(allocator, m_pMetadata);
    11236  m_pMetadata = VMA_NULL;
    11237 }
    11238 
    11239 bool VmaDeviceMemoryBlock::Validate() const
    11240 {
    11241  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
    11242  (m_pMetadata->GetSize() != 0));
    11243 
    11244  return m_pMetadata->Validate();
    11245 }
    11246 
    11247 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
    11248 {
    11249  void* pData = nullptr;
    11250  VkResult res = Map(hAllocator, 1, &pData);
    11251  if(res != VK_SUCCESS)
    11252  {
    11253  return res;
    11254  }
    11255 
    11256  res = m_pMetadata->CheckCorruption(pData);
    11257 
    11258  Unmap(hAllocator, 1);
    11259 
    11260  return res;
    11261 }
    11262 
    11263 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
    11264 {
    11265  if(count == 0)
    11266  {
    11267  return VK_SUCCESS;
    11268  }
    11269 
    11270  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11271  if(m_MapCount != 0)
    11272  {
    11273  m_MapCount += count;
    11274  VMA_ASSERT(m_pMappedData != VMA_NULL);
    11275  if(ppData != VMA_NULL)
    11276  {
    11277  *ppData = m_pMappedData;
    11278  }
    11279  return VK_SUCCESS;
    11280  }
    11281  else
    11282  {
    11283  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    11284  hAllocator->m_hDevice,
    11285  m_hMemory,
    11286  0, // offset
    11287  VK_WHOLE_SIZE,
    11288  0, // flags
    11289  &m_pMappedData);
    11290  if(result == VK_SUCCESS)
    11291  {
    11292  if(ppData != VMA_NULL)
    11293  {
    11294  *ppData = m_pMappedData;
    11295  }
    11296  m_MapCount = count;
    11297  }
    11298  return result;
    11299  }
    11300 }
    11301 
    11302 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
    11303 {
    11304  if(count == 0)
    11305  {
    11306  return;
    11307  }
    11308 
    11309  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11310  if(m_MapCount >= count)
    11311  {
    11312  m_MapCount -= count;
    11313  if(m_MapCount == 0)
    11314  {
    11315  m_pMappedData = VMA_NULL;
    11316  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
    11317  }
    11318  }
    11319  else
    11320  {
    11321  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    11322  }
    11323 }
    11324 
    11325 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    11326 {
    11327  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    11328  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    11329 
    11330  void* pData;
    11331  VkResult res = Map(hAllocator, 1, &pData);
    11332  if(res != VK_SUCCESS)
    11333  {
    11334  return res;
    11335  }
    11336 
    11337  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
    11338  VmaWriteMagicValue(pData, allocOffset + allocSize);
    11339 
    11340  Unmap(hAllocator, 1);
    11341 
    11342  return VK_SUCCESS;
    11343 }
    11344 
    11345 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    11346 {
    11347  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    11348  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    11349 
    11350  void* pData;
    11351  VkResult res = Map(hAllocator, 1, &pData);
    11352  if(res != VK_SUCCESS)
    11353  {
    11354  return res;
    11355  }
    11356 
    11357  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
    11358  {
    11359  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
    11360  }
    11361  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
    11362  {
    11363  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
    11364  }
    11365 
    11366  Unmap(hAllocator, 1);
    11367 
    11368  return VK_SUCCESS;
    11369 }
    11370 
    11371 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
    11372  const VmaAllocator hAllocator,
    11373  const VmaAllocation hAllocation,
    11374  VkBuffer hBuffer)
    11375 {
    11376  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    11377  hAllocation->GetBlock() == this);
    11378  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    11379  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11380  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
    11381  hAllocator->m_hDevice,
    11382  hBuffer,
    11383  m_hMemory,
    11384  hAllocation->GetOffset());
    11385 }
    11386 
    11387 VkResult VmaDeviceMemoryBlock::BindImageMemory(
    11388  const VmaAllocator hAllocator,
    11389  const VmaAllocation hAllocation,
    11390  VkImage hImage)
    11391 {
    11392  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    11393  hAllocation->GetBlock() == this);
    11394  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    11395  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11396  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
    11397  hAllocator->m_hDevice,
    11398  hImage,
    11399  m_hMemory,
    11400  hAllocation->GetOffset());
    11401 }
    11402 
    11403 static void InitStatInfo(VmaStatInfo& outInfo)
    11404 {
    11405  memset(&outInfo, 0, sizeof(outInfo));
    11406  outInfo.allocationSizeMin = UINT64_MAX;
    11407  outInfo.unusedRangeSizeMin = UINT64_MAX;
    11408 }
    11409 
    11410 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    11411 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    11412 {
    11413  inoutInfo.blockCount += srcInfo.blockCount;
    11414  inoutInfo.allocationCount += srcInfo.allocationCount;
    11415  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    11416  inoutInfo.usedBytes += srcInfo.usedBytes;
    11417  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    11418  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    11419  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    11420  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    11421  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    11422 }
    11423 
    11424 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    11425 {
    11426  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    11427  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    11428  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    11429  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    11430 }
    11431 
    11432 VmaPool_T::VmaPool_T(
    11433  VmaAllocator hAllocator,
    11434  const VmaPoolCreateInfo& createInfo,
    11435  VkDeviceSize preferredBlockSize) :
    11436  m_BlockVector(
    11437  hAllocator,
    11438  this, // hParentPool
    11439  createInfo.memoryTypeIndex,
    11440  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
    11441  createInfo.minBlockCount,
    11442  createInfo.maxBlockCount,
    11443  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    11444  createInfo.frameInUseCount,
    11445  true, // isCustomPool
    11446  createInfo.blockSize != 0, // explicitBlockSize
    11447  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
    11448  m_Id(0)
    11449 {
    11450 }
    11451 
    11452 VmaPool_T::~VmaPool_T()
    11453 {
    11454 }
    11455 
    11456 #if VMA_STATS_STRING_ENABLED
    11457 
    11458 #endif // #if VMA_STATS_STRING_ENABLED
    11459 
    11460 VmaBlockVector::VmaBlockVector(
    11461  VmaAllocator hAllocator,
    11462  VmaPool hParentPool,
    11463  uint32_t memoryTypeIndex,
    11464  VkDeviceSize preferredBlockSize,
    11465  size_t minBlockCount,
    11466  size_t maxBlockCount,
    11467  VkDeviceSize bufferImageGranularity,
    11468  uint32_t frameInUseCount,
    11469  bool isCustomPool,
    11470  bool explicitBlockSize,
    11471  uint32_t algorithm) :
    11472  m_hAllocator(hAllocator),
    11473  m_hParentPool(hParentPool),
    11474  m_MemoryTypeIndex(memoryTypeIndex),
    11475  m_PreferredBlockSize(preferredBlockSize),
    11476  m_MinBlockCount(minBlockCount),
    11477  m_MaxBlockCount(maxBlockCount),
    11478  m_BufferImageGranularity(bufferImageGranularity),
    11479  m_FrameInUseCount(frameInUseCount),
    11480  m_IsCustomPool(isCustomPool),
    11481  m_ExplicitBlockSize(explicitBlockSize),
    11482  m_Algorithm(algorithm),
    11483  m_HasEmptyBlock(false),
    11484  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    11485  m_NextBlockId(0)
    11486 {
    11487 }
    11488 
    11489 VmaBlockVector::~VmaBlockVector()
    11490 {
    11491  for(size_t i = m_Blocks.size(); i--; )
    11492  {
    11493  m_Blocks[i]->Destroy(m_hAllocator);
    11494  vma_delete(m_hAllocator, m_Blocks[i]);
    11495  }
    11496 }
    11497 
    11498 VkResult VmaBlockVector::CreateMinBlocks()
    11499 {
    11500  for(size_t i = 0; i < m_MinBlockCount; ++i)
    11501  {
    11502  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    11503  if(res != VK_SUCCESS)
    11504  {
    11505  return res;
    11506  }
    11507  }
    11508  return VK_SUCCESS;
    11509 }
    11510 
    11511 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    11512 {
    11513  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    11514 
    11515  const size_t blockCount = m_Blocks.size();
    11516 
    11517  pStats->size = 0;
    11518  pStats->unusedSize = 0;
    11519  pStats->allocationCount = 0;
    11520  pStats->unusedRangeCount = 0;
    11521  pStats->unusedRangeSizeMax = 0;
    11522  pStats->blockCount = blockCount;
    11523 
    11524  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11525  {
    11526  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11527  VMA_ASSERT(pBlock);
    11528  VMA_HEAVY_ASSERT(pBlock->Validate());
    11529  pBlock->m_pMetadata->AddPoolStats(*pStats);
    11530  }
    11531 }
    11532 
    11533 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
    11534 {
    11535  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    11536  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
    11537  (VMA_DEBUG_MARGIN > 0) &&
    11538  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
    11539  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
    11540 }
    11541 
    11542 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    11543 
    11544 VkResult VmaBlockVector::Allocate(
    11545  uint32_t currentFrameIndex,
    11546  VkDeviceSize size,
    11547  VkDeviceSize alignment,
    11548  const VmaAllocationCreateInfo& createInfo,
    11549  VmaSuballocationType suballocType,
    11550  size_t allocationCount,
    11551  VmaAllocation* pAllocations)
    11552 {
    11553  size_t allocIndex;
    11554  VkResult res = VK_SUCCESS;
    11555 
    11556  if(IsCorruptionDetectionEnabled())
    11557  {
    11558  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
    11559  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
    11560  }
    11561 
    11562  {
    11563  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
    11564  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    11565  {
    11566  res = AllocatePage(
    11567  currentFrameIndex,
    11568  size,
    11569  alignment,
    11570  createInfo,
    11571  suballocType,
    11572  pAllocations + allocIndex);
    11573  if(res != VK_SUCCESS)
    11574  {
    11575  break;
    11576  }
    11577  }
    11578  }
    11579 
    11580  if(res != VK_SUCCESS)
    11581  {
    11582  // Free all already created allocations.
    11583  while(allocIndex--)
    11584  {
    11585  Free(pAllocations[allocIndex]);
    11586  }
    11587  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
    11588  }
    11589 
    11590  return res;
    11591 }
    11592 
    11593 VkResult VmaBlockVector::AllocatePage(
    11594  uint32_t currentFrameIndex,
    11595  VkDeviceSize size,
    11596  VkDeviceSize alignment,
    11597  const VmaAllocationCreateInfo& createInfo,
    11598  VmaSuballocationType suballocType,
    11599  VmaAllocation* pAllocation)
    11600 {
    11601  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    11602  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    11603  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    11604  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    11605  const bool canCreateNewBlock =
    11606  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    11607  (m_Blocks.size() < m_MaxBlockCount);
    11608  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
    11609 
    11610  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
    11611  // Which in turn is available only when maxBlockCount = 1.
    11612  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
    11613  {
    11614  canMakeOtherLost = false;
    11615  }
    11616 
    11617  // Upper address can only be used with linear allocator and within single memory block.
    11618  if(isUpperAddress &&
    11619  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
    11620  {
    11621  return VK_ERROR_FEATURE_NOT_PRESENT;
    11622  }
    11623 
    11624  // Validate strategy.
    11625  switch(strategy)
    11626  {
    11627  case 0:
    11629  break;
    11633  break;
    11634  default:
    11635  return VK_ERROR_FEATURE_NOT_PRESENT;
    11636  }
    11637 
    11638  // Early reject: requested allocation size is larger that maximum block size for this block vector.
    11639  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
    11640  {
    11641  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    11642  }
    11643 
    11644  /*
    11645  Under certain condition, this whole section can be skipped for optimization, so
    11646  we move on directly to trying to allocate with canMakeOtherLost. That's the case
    11647  e.g. for custom pools with linear algorithm.
    11648  */
    11649  if(!canMakeOtherLost || canCreateNewBlock)
    11650  {
    11651  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    11652  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
    11654 
    11655  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    11656  {
    11657  // Use only last block.
    11658  if(!m_Blocks.empty())
    11659  {
    11660  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
    11661  VMA_ASSERT(pCurrBlock);
    11662  VkResult res = AllocateFromBlock(
    11663  pCurrBlock,
    11664  currentFrameIndex,
    11665  size,
    11666  alignment,
    11667  allocFlagsCopy,
    11668  createInfo.pUserData,
    11669  suballocType,
    11670  strategy,
    11671  pAllocation);
    11672  if(res == VK_SUCCESS)
    11673  {
    11674  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
    11675  return VK_SUCCESS;
    11676  }
    11677  }
    11678  }
    11679  else
    11680  {
    11682  {
    11683  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    11684  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    11685  {
    11686  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11687  VMA_ASSERT(pCurrBlock);
    11688  VkResult res = AllocateFromBlock(
    11689  pCurrBlock,
    11690  currentFrameIndex,
    11691  size,
    11692  alignment,
    11693  allocFlagsCopy,
    11694  createInfo.pUserData,
    11695  suballocType,
    11696  strategy,
    11697  pAllocation);
    11698  if(res == VK_SUCCESS)
    11699  {
    11700  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    11701  return VK_SUCCESS;
    11702  }
    11703  }
    11704  }
    11705  else // WORST_FIT, FIRST_FIT
    11706  {
    11707  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    11708  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    11709  {
    11710  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11711  VMA_ASSERT(pCurrBlock);
    11712  VkResult res = AllocateFromBlock(
    11713  pCurrBlock,
    11714  currentFrameIndex,
    11715  size,
    11716  alignment,
    11717  allocFlagsCopy,
    11718  createInfo.pUserData,
    11719  suballocType,
    11720  strategy,
    11721  pAllocation);
    11722  if(res == VK_SUCCESS)
    11723  {
    11724  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    11725  return VK_SUCCESS;
    11726  }
    11727  }
    11728  }
    11729  }
    11730 
    11731  // 2. Try to create new block.
    11732  if(canCreateNewBlock)
    11733  {
    11734  // Calculate optimal size for new block.
    11735  VkDeviceSize newBlockSize = m_PreferredBlockSize;
    11736  uint32_t newBlockSizeShift = 0;
    11737  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
    11738 
    11739  if(!m_ExplicitBlockSize)
    11740  {
    11741  // Allocate 1/8, 1/4, 1/2 as first blocks.
    11742  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
    11743  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
    11744  {
    11745  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    11746  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
    11747  {
    11748  newBlockSize = smallerNewBlockSize;
    11749  ++newBlockSizeShift;
    11750  }
    11751  else
    11752  {
    11753  break;
    11754  }
    11755  }
    11756  }
    11757 
    11758  size_t newBlockIndex = 0;
    11759  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
    11760  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
    11761  if(!m_ExplicitBlockSize)
    11762  {
    11763  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
    11764  {
    11765  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    11766  if(smallerNewBlockSize >= size)
    11767  {
    11768  newBlockSize = smallerNewBlockSize;
    11769  ++newBlockSizeShift;
    11770  res = CreateBlock(newBlockSize, &newBlockIndex);
    11771  }
    11772  else
    11773  {
    11774  break;
    11775  }
    11776  }
    11777  }
    11778 
    11779  if(res == VK_SUCCESS)
    11780  {
    11781  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    11782  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
    11783 
    11784  res = AllocateFromBlock(
    11785  pBlock,
    11786  currentFrameIndex,
    11787  size,
    11788  alignment,
    11789  allocFlagsCopy,
    11790  createInfo.pUserData,
    11791  suballocType,
    11792  strategy,
    11793  pAllocation);
    11794  if(res == VK_SUCCESS)
    11795  {
    11796  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
    11797  return VK_SUCCESS;
    11798  }
    11799  else
    11800  {
    11801  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
    11802  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    11803  }
    11804  }
    11805  }
    11806  }
    11807 
    11808  // 3. Try to allocate from existing blocks with making other allocations lost.
    11809  if(canMakeOtherLost)
    11810  {
    11811  uint32_t tryIndex = 0;
    11812  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    11813  {
    11814  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    11815  VmaAllocationRequest bestRequest = {};
    11816  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    11817 
    11818  // 1. Search existing allocations.
    11820  {
    11821  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    11822  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    11823  {
    11824  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11825  VMA_ASSERT(pCurrBlock);
    11826  VmaAllocationRequest currRequest = {};
    11827  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    11828  currentFrameIndex,
    11829  m_FrameInUseCount,
    11830  m_BufferImageGranularity,
    11831  size,
    11832  alignment,
    11833  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    11834  suballocType,
    11835  canMakeOtherLost,
    11836  strategy,
    11837  &currRequest))
    11838  {
    11839  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    11840  if(pBestRequestBlock == VMA_NULL ||
    11841  currRequestCost < bestRequestCost)
    11842  {
    11843  pBestRequestBlock = pCurrBlock;
    11844  bestRequest = currRequest;
    11845  bestRequestCost = currRequestCost;
    11846 
    11847  if(bestRequestCost == 0)
    11848  {
    11849  break;
    11850  }
    11851  }
    11852  }
    11853  }
    11854  }
    11855  else // WORST_FIT, FIRST_FIT
    11856  {
    11857  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    11858  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    11859  {
    11860  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11861  VMA_ASSERT(pCurrBlock);
    11862  VmaAllocationRequest currRequest = {};
    11863  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    11864  currentFrameIndex,
    11865  m_FrameInUseCount,
    11866  m_BufferImageGranularity,
    11867  size,
    11868  alignment,
    11869  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    11870  suballocType,
    11871  canMakeOtherLost,
    11872  strategy,
    11873  &currRequest))
    11874  {
    11875  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    11876  if(pBestRequestBlock == VMA_NULL ||
    11877  currRequestCost < bestRequestCost ||
    11879  {
    11880  pBestRequestBlock = pCurrBlock;
    11881  bestRequest = currRequest;
    11882  bestRequestCost = currRequestCost;
    11883 
    11884  if(bestRequestCost == 0 ||
    11886  {
    11887  break;
    11888  }
    11889  }
    11890  }
    11891  }
    11892  }
    11893 
    11894  if(pBestRequestBlock != VMA_NULL)
    11895  {
    11896  if(mapped)
    11897  {
    11898  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
    11899  if(res != VK_SUCCESS)
    11900  {
    11901  return res;
    11902  }
    11903  }
    11904 
    11905  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
    11906  currentFrameIndex,
    11907  m_FrameInUseCount,
    11908  &bestRequest))
    11909  {
    11910  // We no longer have an empty Allocation.
    11911  if(pBestRequestBlock->m_pMetadata->IsEmpty())
    11912  {
    11913  m_HasEmptyBlock = false;
    11914  }
    11915  // Allocate from this pBlock.
    11916  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
    11917  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
    11918  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
    11919  (*pAllocation)->InitBlockAllocation(
    11920  pBestRequestBlock,
    11921  bestRequest.offset,
    11922  alignment,
    11923  size,
    11924  suballocType,
    11925  mapped,
    11926  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    11927  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
    11928  VMA_DEBUG_LOG(" Returned from existing block");
    11929  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    11930  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    11931  {
    11932  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    11933  }
    11934  if(IsCorruptionDetectionEnabled())
    11935  {
    11936  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
    11937  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    11938  }
    11939  return VK_SUCCESS;
    11940  }
    11941  // else: Some allocations must have been touched while we are here. Next try.
    11942  }
    11943  else
    11944  {
    11945  // Could not find place in any of the blocks - break outer loop.
    11946  break;
    11947  }
    11948  }
    11949  /* Maximum number of tries exceeded - a very unlike event when many other
    11950  threads are simultaneously touching allocations making it impossible to make
    11951  lost at the same time as we try to allocate. */
    11952  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    11953  {
    11954  return VK_ERROR_TOO_MANY_OBJECTS;
    11955  }
    11956  }
    11957 
    11958  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    11959 }
    11960 
    11961 void VmaBlockVector::Free(
    11962  VmaAllocation hAllocation)
    11963 {
    11964  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    11965 
    11966  // Scope for lock.
    11967  {
    11968  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
    11969 
    11970  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    11971 
    11972  if(IsCorruptionDetectionEnabled())
    11973  {
    11974  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
    11975  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
    11976  }
    11977 
    11978  if(hAllocation->IsPersistentMap())
    11979  {
    11980  pBlock->Unmap(m_hAllocator, 1);
    11981  }
    11982 
    11983  pBlock->m_pMetadata->Free(hAllocation);
    11984  VMA_HEAVY_ASSERT(pBlock->Validate());
    11985 
    11986  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
    11987 
    11988  // pBlock became empty after this deallocation.
    11989  if(pBlock->m_pMetadata->IsEmpty())
    11990  {
    11991  // Already has empty Allocation. We don't want to have two, so delete this one.
    11992  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    11993  {
    11994  pBlockToDelete = pBlock;
    11995  Remove(pBlock);
    11996  }
    11997  // We now have first empty block.
    11998  else
    11999  {
    12000  m_HasEmptyBlock = true;
    12001  }
    12002  }
    12003  // pBlock didn't become empty, but we have another empty block - find and free that one.
    12004  // (This is optional, heuristics.)
    12005  else if(m_HasEmptyBlock)
    12006  {
    12007  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    12008  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    12009  {
    12010  pBlockToDelete = pLastBlock;
    12011  m_Blocks.pop_back();
    12012  m_HasEmptyBlock = false;
    12013  }
    12014  }
    12015 
    12016  IncrementallySortBlocks();
    12017  }
    12018 
    12019  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    12020  // lock, for performance reason.
    12021  if(pBlockToDelete != VMA_NULL)
    12022  {
    12023  VMA_DEBUG_LOG(" Deleted empty allocation");
    12024  pBlockToDelete->Destroy(m_hAllocator);
    12025  vma_delete(m_hAllocator, pBlockToDelete);
    12026  }
    12027 }
    12028 
    12029 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
    12030 {
    12031  VkDeviceSize result = 0;
    12032  for(size_t i = m_Blocks.size(); i--; )
    12033  {
    12034  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
    12035  if(result >= m_PreferredBlockSize)
    12036  {
    12037  break;
    12038  }
    12039  }
    12040  return result;
    12041 }
    12042 
    12043 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    12044 {
    12045  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    12046  {
    12047  if(m_Blocks[blockIndex] == pBlock)
    12048  {
    12049  VmaVectorRemove(m_Blocks, blockIndex);
    12050  return;
    12051  }
    12052  }
    12053  VMA_ASSERT(0);
    12054 }
    12055 
    12056 void VmaBlockVector::IncrementallySortBlocks()
    12057 {
    12058  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    12059  {
    12060  // Bubble sort only until first swap.
    12061  for(size_t i = 1; i < m_Blocks.size(); ++i)
    12062  {
    12063  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
    12064  {
    12065  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    12066  return;
    12067  }
    12068  }
    12069  }
    12070 }
    12071 
    12072 VkResult VmaBlockVector::AllocateFromBlock(
    12073  VmaDeviceMemoryBlock* pBlock,
    12074  uint32_t currentFrameIndex,
    12075  VkDeviceSize size,
    12076  VkDeviceSize alignment,
    12077  VmaAllocationCreateFlags allocFlags,
    12078  void* pUserData,
    12079  VmaSuballocationType suballocType,
    12080  uint32_t strategy,
    12081  VmaAllocation* pAllocation)
    12082 {
    12083  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
    12084  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    12085  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    12086  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    12087 
    12088  VmaAllocationRequest currRequest = {};
    12089  if(pBlock->m_pMetadata->CreateAllocationRequest(
    12090  currentFrameIndex,
    12091  m_FrameInUseCount,
    12092  m_BufferImageGranularity,
    12093  size,
    12094  alignment,
    12095  isUpperAddress,
    12096  suballocType,
    12097  false, // canMakeOtherLost
    12098  strategy,
    12099  &currRequest))
    12100  {
    12101  // Allocate from pCurrBlock.
    12102  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    12103 
    12104  if(mapped)
    12105  {
    12106  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
    12107  if(res != VK_SUCCESS)
    12108  {
    12109  return res;
    12110  }
    12111  }
    12112 
    12113  // We no longer have an empty Allocation.
    12114  if(pBlock->m_pMetadata->IsEmpty())
    12115  {
    12116  m_HasEmptyBlock = false;
    12117  }
    12118 
    12119  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
    12120  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
    12121  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
    12122  (*pAllocation)->InitBlockAllocation(
    12123  pBlock,
    12124  currRequest.offset,
    12125  alignment,
    12126  size,
    12127  suballocType,
    12128  mapped,
    12129  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    12130  VMA_HEAVY_ASSERT(pBlock->Validate());
    12131  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
    12132  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12133  {
    12134  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    12135  }
    12136  if(IsCorruptionDetectionEnabled())
    12137  {
    12138  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
    12139  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    12140  }
    12141  return VK_SUCCESS;
    12142  }
    12143  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12144 }
    12145 
    12146 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    12147 {
    12148  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    12149  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    12150  allocInfo.allocationSize = blockSize;
    12151  VkDeviceMemory mem = VK_NULL_HANDLE;
    12152  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    12153  if(res < 0)
    12154  {
    12155  return res;
    12156  }
    12157 
    12158  // New VkDeviceMemory successfully created.
    12159 
    12160  // Create new Allocation for it.
    12161  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    12162  pBlock->Init(
    12163  m_hAllocator,
    12164  m_hParentPool,
    12165  m_MemoryTypeIndex,
    12166  mem,
    12167  allocInfo.allocationSize,
    12168  m_NextBlockId++,
    12169  m_Algorithm);
    12170 
    12171  m_Blocks.push_back(pBlock);
    12172  if(pNewBlockIndex != VMA_NULL)
    12173  {
    12174  *pNewBlockIndex = m_Blocks.size() - 1;
    12175  }
    12176 
    12177  return VK_SUCCESS;
    12178 }
    12179 
    12180 void VmaBlockVector::ApplyDefragmentationMovesCpu(
    12181  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    12182  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
    12183 {
    12184  const size_t blockCount = m_Blocks.size();
    12185  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
    12186 
    12187  enum BLOCK_FLAG
    12188  {
    12189  BLOCK_FLAG_USED = 0x00000001,
    12190  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
    12191  };
    12192 
    12193  struct BlockInfo
    12194  {
    12195  uint32_t flags;
    12196  void* pMappedData;
    12197  };
    12198  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
    12199  blockInfo(blockCount, VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
    12200  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
    12201 
    12202  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
    12203  const size_t moveCount = moves.size();
    12204  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12205  {
    12206  const VmaDefragmentationMove& move = moves[moveIndex];
    12207  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
    12208  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
    12209  }
    12210 
    12211  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
    12212 
    12213  // Go over all blocks. Get mapped pointer or map if necessary.
    12214  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
    12215  {
    12216  BlockInfo& currBlockInfo = blockInfo[blockIndex];
    12217  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12218  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
    12219  {
    12220  currBlockInfo.pMappedData = pBlock->GetMappedData();
    12221  // It is not originally mapped - map it.
    12222  if(currBlockInfo.pMappedData == VMA_NULL)
    12223  {
    12224  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
    12225  if(pDefragCtx->res == VK_SUCCESS)
    12226  {
    12227  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
    12228  }
    12229  }
    12230  }
    12231  }
    12232 
    12233  // Go over all moves. Do actual data transfer.
    12234  if(pDefragCtx->res == VK_SUCCESS)
    12235  {
    12236  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    12237  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    12238 
    12239  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12240  {
    12241  const VmaDefragmentationMove& move = moves[moveIndex];
    12242 
    12243  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
    12244  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
    12245 
    12246  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
    12247 
    12248  // Invalidate source.
    12249  if(isNonCoherent)
    12250  {
    12251  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
    12252  memRange.memory = pSrcBlock->GetDeviceMemory();
    12253  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
    12254  memRange.size = VMA_MIN(
    12255  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
    12256  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
    12257  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
    12258  }
    12259 
    12260  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    12261  memmove(
    12262  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
    12263  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
    12264  static_cast<size_t>(move.size));
    12265 
    12266  if(IsCorruptionDetectionEnabled())
    12267  {
    12268  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
    12269  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
    12270  }
    12271 
    12272  // Flush destination.
    12273  if(isNonCoherent)
    12274  {
    12275  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
    12276  memRange.memory = pDstBlock->GetDeviceMemory();
    12277  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
    12278  memRange.size = VMA_MIN(
    12279  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
    12280  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
    12281  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
    12282  }
    12283  }
    12284  }
    12285 
    12286  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
    12287  // Regardless of pCtx->res == VK_SUCCESS.
    12288  for(size_t blockIndex = blockCount; blockIndex--; )
    12289  {
    12290  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
    12291  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
    12292  {
    12293  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12294  pBlock->Unmap(m_hAllocator, 1);
    12295  }
    12296  }
    12297 }
    12298 
    12299 void VmaBlockVector::ApplyDefragmentationMovesGpu(
    12300  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    12301  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12302  VkCommandBuffer commandBuffer)
    12303 {
    12304  const size_t blockCount = m_Blocks.size();
    12305 
    12306  pDefragCtx->blockContexts.resize(blockCount);
    12307  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
    12308 
    12309  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
    12310  const size_t moveCount = moves.size();
    12311  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12312  {
    12313  const VmaDefragmentationMove& move = moves[moveIndex];
    12314  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
    12315  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
    12316  }
    12317 
    12318  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
    12319 
    12320  // Go over all blocks. Create and bind buffer for whole block if necessary.
    12321  {
    12322  VkBufferCreateInfo bufCreateInfo;
    12323  VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo);
    12324 
    12325  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
    12326  {
    12327  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
    12328  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12329  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
    12330  {
    12331  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
    12332  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
    12333  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
    12334  if(pDefragCtx->res == VK_SUCCESS)
    12335  {
    12336  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
    12337  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
    12338  }
    12339  }
    12340  }
    12341  }
    12342 
    12343  // Go over all moves. Post data transfer commands to command buffer.
    12344  if(pDefragCtx->res == VK_SUCCESS)
    12345  {
    12346  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12347  {
    12348  const VmaDefragmentationMove& move = moves[moveIndex];
    12349 
    12350  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
    12351  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
    12352 
    12353  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
    12354 
    12355  VkBufferCopy region = {
    12356  move.srcOffset,
    12357  move.dstOffset,
    12358  move.size };
    12359  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
    12360  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
    12361  }
    12362  }
    12363 
    12364  // Save buffers to defrag context for later destruction.
    12365  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
    12366  {
    12367  pDefragCtx->res = VK_NOT_READY;
    12368  }
    12369 }
    12370 
    12371 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
    12372 {
    12373  m_HasEmptyBlock = false;
    12374  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    12375  {
    12376  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12377  if(pBlock->m_pMetadata->IsEmpty())
    12378  {
    12379  if(m_Blocks.size() > m_MinBlockCount)
    12380  {
    12381  if(pDefragmentationStats != VMA_NULL)
    12382  {
    12383  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    12384  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
    12385  }
    12386 
    12387  VmaVectorRemove(m_Blocks, blockIndex);
    12388  pBlock->Destroy(m_hAllocator);
    12389  vma_delete(m_hAllocator, pBlock);
    12390  }
    12391  else
    12392  {
    12393  m_HasEmptyBlock = true;
    12394  }
    12395  }
    12396  }
    12397 }
    12398 
    12399 #if VMA_STATS_STRING_ENABLED
    12400 
    12401 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    12402 {
    12403  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    12404 
    12405  json.BeginObject();
    12406 
    12407  if(m_IsCustomPool)
    12408  {
    12409  json.WriteString("MemoryTypeIndex");
    12410  json.WriteNumber(m_MemoryTypeIndex);
    12411 
    12412  json.WriteString("BlockSize");
    12413  json.WriteNumber(m_PreferredBlockSize);
    12414 
    12415  json.WriteString("BlockCount");
    12416  json.BeginObject(true);
    12417  if(m_MinBlockCount > 0)
    12418  {
    12419  json.WriteString("Min");
    12420  json.WriteNumber((uint64_t)m_MinBlockCount);
    12421  }
    12422  if(m_MaxBlockCount < SIZE_MAX)
    12423  {
    12424  json.WriteString("Max");
    12425  json.WriteNumber((uint64_t)m_MaxBlockCount);
    12426  }
    12427  json.WriteString("Cur");
    12428  json.WriteNumber((uint64_t)m_Blocks.size());
    12429  json.EndObject();
    12430 
    12431  if(m_FrameInUseCount > 0)
    12432  {
    12433  json.WriteString("FrameInUseCount");
    12434  json.WriteNumber(m_FrameInUseCount);
    12435  }
    12436 
    12437  if(m_Algorithm != 0)
    12438  {
    12439  json.WriteString("Algorithm");
    12440  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
    12441  }
    12442  }
    12443  else
    12444  {
    12445  json.WriteString("PreferredBlockSize");
    12446  json.WriteNumber(m_PreferredBlockSize);
    12447  }
    12448 
    12449  json.WriteString("Blocks");
    12450  json.BeginObject();
    12451  for(size_t i = 0; i < m_Blocks.size(); ++i)
    12452  {
    12453  json.BeginString();
    12454  json.ContinueString(m_Blocks[i]->GetId());
    12455  json.EndString();
    12456 
    12457  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
    12458  }
    12459  json.EndObject();
    12460 
    12461  json.EndObject();
    12462 }
    12463 
    12464 #endif // #if VMA_STATS_STRING_ENABLED
    12465 
    12466 void VmaBlockVector::Defragment(
    12467  class VmaBlockVectorDefragmentationContext* pCtx,
    12468  VmaDefragmentationStats* pStats,
    12469  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
    12470  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
    12471  VkCommandBuffer commandBuffer)
    12472 {
    12473  pCtx->res = VK_SUCCESS;
    12474 
    12475  const VkMemoryPropertyFlags memPropFlags =
    12476  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
    12477  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
    12478 
    12479  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
    12480  isHostVisible;
    12481  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
    12482  !IsCorruptionDetectionEnabled() &&
    12483  ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0;
    12484 
    12485  // There are options to defragment this memory type.
    12486  if(canDefragmentOnCpu || canDefragmentOnGpu)
    12487  {
    12488  bool defragmentOnGpu;
    12489  // There is only one option to defragment this memory type.
    12490  if(canDefragmentOnGpu != canDefragmentOnCpu)
    12491  {
    12492  defragmentOnGpu = canDefragmentOnGpu;
    12493  }
    12494  // Both options are available: Heuristics to choose the best one.
    12495  else
    12496  {
    12497  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
    12498  m_hAllocator->IsIntegratedGpu();
    12499  }
    12500 
    12501  bool overlappingMoveSupported = !defragmentOnGpu;
    12502 
    12503  if(m_hAllocator->m_UseMutex)
    12504  {
    12505  m_Mutex.LockWrite();
    12506  pCtx->mutexLocked = true;
    12507  }
    12508 
    12509  pCtx->Begin(overlappingMoveSupported);
    12510 
    12511  // Defragment.
    12512 
    12513  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
    12514  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
    12515  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
    12516  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
    12517  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
    12518 
    12519  // Accumulate statistics.
    12520  if(pStats != VMA_NULL)
    12521  {
    12522  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
    12523  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
    12524  pStats->bytesMoved += bytesMoved;
    12525  pStats->allocationsMoved += allocationsMoved;
    12526  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    12527  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    12528  if(defragmentOnGpu)
    12529  {
    12530  maxGpuBytesToMove -= bytesMoved;
    12531  maxGpuAllocationsToMove -= allocationsMoved;
    12532  }
    12533  else
    12534  {
    12535  maxCpuBytesToMove -= bytesMoved;
    12536  maxCpuAllocationsToMove -= allocationsMoved;
    12537  }
    12538  }
    12539 
    12540  if(pCtx->res >= VK_SUCCESS)
    12541  {
    12542  if(defragmentOnGpu)
    12543  {
    12544  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
    12545  }
    12546  else
    12547  {
    12548  ApplyDefragmentationMovesCpu(pCtx, moves);
    12549  }
    12550  }
    12551  }
    12552 }
    12553 
    12554 void VmaBlockVector::DefragmentationEnd(
    12555  class VmaBlockVectorDefragmentationContext* pCtx,
    12556  VmaDefragmentationStats* pStats)
    12557 {
    12558  // Destroy buffers.
    12559  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
    12560  {
    12561  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
    12562  if(blockCtx.hBuffer)
    12563  {
    12564  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
    12565  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
    12566  }
    12567  }
    12568 
    12569  if(pCtx->res >= VK_SUCCESS)
    12570  {
    12571  FreeEmptyBlocks(pStats);
    12572  }
    12573 
    12574  if(pCtx->mutexLocked)
    12575  {
    12576  VMA_ASSERT(m_hAllocator->m_UseMutex);
    12577  m_Mutex.UnlockWrite();
    12578  }
    12579 }
    12580 
    12581 size_t VmaBlockVector::CalcAllocationCount() const
    12582 {
    12583  size_t result = 0;
    12584  for(size_t i = 0; i < m_Blocks.size(); ++i)
    12585  {
    12586  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
    12587  }
    12588  return result;
    12589 }
    12590 
    12591 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
    12592 {
    12593  if(m_BufferImageGranularity == 1)
    12594  {
    12595  return false;
    12596  }
    12597  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
    12598  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
    12599  {
    12600  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
    12601  VMA_ASSERT(m_Algorithm == 0);
    12602  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
    12603  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
    12604  {
    12605  return true;
    12606  }
    12607  }
    12608  return false;
    12609 }
    12610 
    12611 void VmaBlockVector::MakePoolAllocationsLost(
    12612  uint32_t currentFrameIndex,
    12613  size_t* pLostAllocationCount)
    12614 {
    12615  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
    12616  size_t lostAllocationCount = 0;
    12617  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    12618  {
    12619  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    12620  VMA_ASSERT(pBlock);
    12621  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    12622  }
    12623  if(pLostAllocationCount != VMA_NULL)
    12624  {
    12625  *pLostAllocationCount = lostAllocationCount;
    12626  }
    12627 }
    12628 
    12629 VkResult VmaBlockVector::CheckCorruption()
    12630 {
    12631  if(!IsCorruptionDetectionEnabled())
    12632  {
    12633  return VK_ERROR_FEATURE_NOT_PRESENT;
    12634  }
    12635 
    12636  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    12637  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    12638  {
    12639  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    12640  VMA_ASSERT(pBlock);
    12641  VkResult res = pBlock->CheckCorruption(m_hAllocator);
    12642  if(res != VK_SUCCESS)
    12643  {
    12644  return res;
    12645  }
    12646  }
    12647  return VK_SUCCESS;
    12648 }
    12649 
    12650 void VmaBlockVector::AddStats(VmaStats* pStats)
    12651 {
    12652  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    12653  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    12654 
    12655  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    12656 
    12657  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    12658  {
    12659  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    12660  VMA_ASSERT(pBlock);
    12661  VMA_HEAVY_ASSERT(pBlock->Validate());
    12662  VmaStatInfo allocationStatInfo;
    12663  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
    12664  VmaAddStatInfo(pStats->total, allocationStatInfo);
    12665  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    12666  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    12667  }
    12668 }
    12669 
    12671 // VmaDefragmentationAlgorithm_Generic members definition
    12672 
    12673 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
    12674  VmaAllocator hAllocator,
    12675  VmaBlockVector* pBlockVector,
    12676  uint32_t currentFrameIndex,
    12677  bool overlappingMoveSupported) :
    12678  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
    12679  m_AllocationCount(0),
    12680  m_AllAllocations(false),
    12681  m_BytesMoved(0),
    12682  m_AllocationsMoved(0),
    12683  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    12684 {
    12685  // Create block info for each block.
    12686  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    12687  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    12688  {
    12689  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    12690  pBlockInfo->m_OriginalBlockIndex = blockIndex;
    12691  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    12692  m_Blocks.push_back(pBlockInfo);
    12693  }
    12694 
    12695  // Sort them by m_pBlock pointer value.
    12696  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    12697 }
    12698 
    12699 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
    12700 {
    12701  for(size_t i = m_Blocks.size(); i--; )
    12702  {
    12703  vma_delete(m_hAllocator, m_Blocks[i]);
    12704  }
    12705 }
    12706 
    12707 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    12708 {
    12709  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    12710  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    12711  {
    12712  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
    12713  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    12714  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    12715  {
    12716  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
    12717  (*it)->m_Allocations.push_back(allocInfo);
    12718  }
    12719  else
    12720  {
    12721  VMA_ASSERT(0);
    12722  }
    12723 
    12724  ++m_AllocationCount;
    12725  }
    12726 }
    12727 
    12728 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
    12729  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12730  VkDeviceSize maxBytesToMove,
    12731  uint32_t maxAllocationsToMove)
    12732 {
    12733  if(m_Blocks.empty())
    12734  {
    12735  return VK_SUCCESS;
    12736  }
    12737 
    12738  // This is a choice based on research.
    12739  // Option 1:
    12740  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
    12741  // Option 2:
    12742  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
    12743  // Option 3:
    12744  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
    12745 
    12746  size_t srcBlockMinIndex = 0;
    12747  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
    12748  /*
    12749  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
    12750  {
    12751  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
    12752  if(blocksWithNonMovableCount > 0)
    12753  {
    12754  srcBlockMinIndex = blocksWithNonMovableCount - 1;
    12755  }
    12756  }
    12757  */
    12758 
    12759  size_t srcBlockIndex = m_Blocks.size() - 1;
    12760  size_t srcAllocIndex = SIZE_MAX;
    12761  for(;;)
    12762  {
    12763  // 1. Find next allocation to move.
    12764  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    12765  // 1.2. Then start from last to first m_Allocations.
    12766  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    12767  {
    12768  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    12769  {
    12770  // Finished: no more allocations to process.
    12771  if(srcBlockIndex == srcBlockMinIndex)
    12772  {
    12773  return VK_SUCCESS;
    12774  }
    12775  else
    12776  {
    12777  --srcBlockIndex;
    12778  srcAllocIndex = SIZE_MAX;
    12779  }
    12780  }
    12781  else
    12782  {
    12783  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    12784  }
    12785  }
    12786 
    12787  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    12788  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    12789 
    12790  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    12791  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    12792  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    12793  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    12794 
    12795  // 2. Try to find new place for this allocation in preceding or current block.
    12796  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    12797  {
    12798  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    12799  VmaAllocationRequest dstAllocRequest;
    12800  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
    12801  m_CurrentFrameIndex,
    12802  m_pBlockVector->GetFrameInUseCount(),
    12803  m_pBlockVector->GetBufferImageGranularity(),
    12804  size,
    12805  alignment,
    12806  false, // upperAddress
    12807  suballocType,
    12808  false, // canMakeOtherLost
    12809  strategy,
    12810  &dstAllocRequest) &&
    12811  MoveMakesSense(
    12812  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    12813  {
    12814  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    12815 
    12816  // Reached limit on number of allocations or bytes to move.
    12817  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    12818  (m_BytesMoved + size > maxBytesToMove))
    12819  {
    12820  return VK_SUCCESS;
    12821  }
    12822 
    12823  VmaDefragmentationMove move;
    12824  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
    12825  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
    12826  move.srcOffset = srcOffset;
    12827  move.dstOffset = dstAllocRequest.offset;
    12828  move.size = size;
    12829  moves.push_back(move);
    12830 
    12831  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
    12832  dstAllocRequest,
    12833  suballocType,
    12834  size,
    12835  allocInfo.m_hAllocation);
    12836  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
    12837 
    12838  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    12839 
    12840  if(allocInfo.m_pChanged != VMA_NULL)
    12841  {
    12842  *allocInfo.m_pChanged = VK_TRUE;
    12843  }
    12844 
    12845  ++m_AllocationsMoved;
    12846  m_BytesMoved += size;
    12847 
    12848  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    12849 
    12850  break;
    12851  }
    12852  }
    12853 
    12854  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    12855 
    12856  if(srcAllocIndex > 0)
    12857  {
    12858  --srcAllocIndex;
    12859  }
    12860  else
    12861  {
    12862  if(srcBlockIndex > 0)
    12863  {
    12864  --srcBlockIndex;
    12865  srcAllocIndex = SIZE_MAX;
    12866  }
    12867  else
    12868  {
    12869  return VK_SUCCESS;
    12870  }
    12871  }
    12872  }
    12873 }
    12874 
    12875 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
    12876 {
    12877  size_t result = 0;
    12878  for(size_t i = 0; i < m_Blocks.size(); ++i)
    12879  {
    12880  if(m_Blocks[i]->m_HasNonMovableAllocations)
    12881  {
    12882  ++result;
    12883  }
    12884  }
    12885  return result;
    12886 }
    12887 
    12888 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
    12889  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12890  VkDeviceSize maxBytesToMove,
    12891  uint32_t maxAllocationsToMove)
    12892 {
    12893  if(!m_AllAllocations && m_AllocationCount == 0)
    12894  {
    12895  return VK_SUCCESS;
    12896  }
    12897 
    12898  const size_t blockCount = m_Blocks.size();
    12899  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    12900  {
    12901  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    12902 
    12903  if(m_AllAllocations)
    12904  {
    12905  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
    12906  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
    12907  it != pMetadata->m_Suballocations.end();
    12908  ++it)
    12909  {
    12910  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    12911  {
    12912  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
    12913  pBlockInfo->m_Allocations.push_back(allocInfo);
    12914  }
    12915  }
    12916  }
    12917 
    12918  pBlockInfo->CalcHasNonMovableAllocations();
    12919 
    12920  // This is a choice based on research.
    12921  // Option 1:
    12922  pBlockInfo->SortAllocationsByOffsetDescending();
    12923  // Option 2:
    12924  //pBlockInfo->SortAllocationsBySizeDescending();
    12925  }
    12926 
    12927  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    12928  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    12929 
    12930  // This is a choice based on research.
    12931  const uint32_t roundCount = 2;
    12932 
    12933  // Execute defragmentation rounds (the main part).
    12934  VkResult result = VK_SUCCESS;
    12935  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
    12936  {
    12937  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
    12938  }
    12939 
    12940  return result;
    12941 }
    12942 
    12943 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
    12944  size_t dstBlockIndex, VkDeviceSize dstOffset,
    12945  size_t srcBlockIndex, VkDeviceSize srcOffset)
    12946 {
    12947  if(dstBlockIndex < srcBlockIndex)
    12948  {
    12949  return true;
    12950  }
    12951  if(dstBlockIndex > srcBlockIndex)
    12952  {
    12953  return false;
    12954  }
    12955  if(dstOffset < srcOffset)
    12956  {
    12957  return true;
    12958  }
    12959  return false;
    12960 }
    12961 
    12963 // VmaDefragmentationAlgorithm_Fast
    12964 
    12965 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
    12966  VmaAllocator hAllocator,
    12967  VmaBlockVector* pBlockVector,
    12968  uint32_t currentFrameIndex,
    12969  bool overlappingMoveSupported) :
    12970  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
    12971  m_OverlappingMoveSupported(overlappingMoveSupported),
    12972  m_AllocationCount(0),
    12973  m_AllAllocations(false),
    12974  m_BytesMoved(0),
    12975  m_AllocationsMoved(0),
    12976  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
    12977 {
    12978  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
    12979 
    12980 }
    12981 
    12982 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
    12983 {
    12984 }
    12985 
    12986 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
    12987  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12988  VkDeviceSize maxBytesToMove,
    12989  uint32_t maxAllocationsToMove)
    12990 {
    12991  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
    12992 
    12993  const size_t blockCount = m_pBlockVector->GetBlockCount();
    12994  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
    12995  {
    12996  return VK_SUCCESS;
    12997  }
    12998 
    12999  PreprocessMetadata();
    13000 
    13001  // Sort blocks in order from most destination.
    13002 
    13003  m_BlockInfos.resize(blockCount);
    13004  for(size_t i = 0; i < blockCount; ++i)
    13005  {
    13006  m_BlockInfos[i].origBlockIndex = i;
    13007  }
    13008 
    13009  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
    13010  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
    13011  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
    13012  });
    13013 
    13014  // THE MAIN ALGORITHM
    13015 
    13016  FreeSpaceDatabase freeSpaceDb;
    13017 
    13018  size_t dstBlockInfoIndex = 0;
    13019  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
    13020  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
    13021  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
    13022  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
    13023  VkDeviceSize dstOffset = 0;
    13024 
    13025  bool end = false;
    13026  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
    13027  {
    13028  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
    13029  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
    13030  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
    13031  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
    13032  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
    13033  {
    13034  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
    13035  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
    13036  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
    13037  if(m_AllocationsMoved == maxAllocationsToMove ||
    13038  m_BytesMoved + srcAllocSize > maxBytesToMove)
    13039  {
    13040  end = true;
    13041  break;
    13042  }
    13043  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
    13044 
    13045  // Try to place it in one of free spaces from the database.
    13046  size_t freeSpaceInfoIndex;
    13047  VkDeviceSize dstAllocOffset;
    13048  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
    13049  freeSpaceInfoIndex, dstAllocOffset))
    13050  {
    13051  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
    13052  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
    13053  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
    13054 
    13055  // Same block
    13056  if(freeSpaceInfoIndex == srcBlockInfoIndex)
    13057  {
    13058  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
    13059 
    13060  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
    13061 
    13062  VmaSuballocation suballoc = *srcSuballocIt;
    13063  suballoc.offset = dstAllocOffset;
    13064  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
    13065  m_BytesMoved += srcAllocSize;
    13066  ++m_AllocationsMoved;
    13067 
    13068  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
    13069  ++nextSuballocIt;
    13070  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
    13071  srcSuballocIt = nextSuballocIt;
    13072 
    13073  InsertSuballoc(pFreeSpaceMetadata, suballoc);
    13074 
    13075  VmaDefragmentationMove move = {
    13076  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
    13077  srcAllocOffset, dstAllocOffset,
    13078  srcAllocSize };
    13079  moves.push_back(move);
    13080  }
    13081  // Different block
    13082  else
    13083  {
    13084  // MOVE OPTION 2: Move the allocation to a different block.
    13085 
    13086  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
    13087 
    13088  VmaSuballocation suballoc = *srcSuballocIt;
    13089  suballoc.offset = dstAllocOffset;
    13090  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
    13091  m_BytesMoved += srcAllocSize;
    13092  ++m_AllocationsMoved;
    13093 
    13094  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
    13095  ++nextSuballocIt;
    13096  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
    13097  srcSuballocIt = nextSuballocIt;
    13098 
    13099  InsertSuballoc(pFreeSpaceMetadata, suballoc);
    13100 
    13101  VmaDefragmentationMove move = {
    13102  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
    13103  srcAllocOffset, dstAllocOffset,
    13104  srcAllocSize };
    13105  moves.push_back(move);
    13106  }
    13107  }
    13108  else
    13109  {
    13110  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
    13111 
    13112  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
    13113  while(dstBlockInfoIndex < srcBlockInfoIndex &&
    13114  dstAllocOffset + srcAllocSize > dstBlockSize)
    13115  {
    13116  // But before that, register remaining free space at the end of dst block.
    13117  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
    13118 
    13119  ++dstBlockInfoIndex;
    13120  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
    13121  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
    13122  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
    13123  dstBlockSize = pDstMetadata->GetSize();
    13124  dstOffset = 0;
    13125  dstAllocOffset = 0;
    13126  }
    13127 
    13128  // Same block
    13129  if(dstBlockInfoIndex == srcBlockInfoIndex)
    13130  {
    13131  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
    13132 
    13133  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
    13134 
    13135  bool skipOver = overlap;
    13136  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
    13137  {
    13138  // If destination and source place overlap, skip if it would move it
    13139  // by only < 1/64 of its size.
    13140  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
    13141  }
    13142 
    13143  if(skipOver)
    13144  {
    13145  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
    13146 
    13147  dstOffset = srcAllocOffset + srcAllocSize;
    13148  ++srcSuballocIt;
    13149  }
    13150  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
    13151  else
    13152  {
    13153  srcSuballocIt->offset = dstAllocOffset;
    13154  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
    13155  dstOffset = dstAllocOffset + srcAllocSize;
    13156  m_BytesMoved += srcAllocSize;
    13157  ++m_AllocationsMoved;
    13158  ++srcSuballocIt;
    13159  VmaDefragmentationMove move = {
    13160  srcOrigBlockIndex, dstOrigBlockIndex,
    13161  srcAllocOffset, dstAllocOffset,
    13162  srcAllocSize };
    13163  moves.push_back(move);
    13164  }
    13165  }
    13166  // Different block
    13167  else
    13168  {
    13169  // MOVE OPTION 2: Move the allocation to a different block.
    13170 
    13171  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
    13172  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
    13173 
    13174  VmaSuballocation suballoc = *srcSuballocIt;
    13175  suballoc.offset = dstAllocOffset;
    13176  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
    13177  dstOffset = dstAllocOffset + srcAllocSize;
    13178  m_BytesMoved += srcAllocSize;
    13179  ++m_AllocationsMoved;
    13180 
    13181  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
    13182  ++nextSuballocIt;
    13183  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
    13184  srcSuballocIt = nextSuballocIt;
    13185 
    13186  pDstMetadata->m_Suballocations.push_back(suballoc);
    13187 
    13188  VmaDefragmentationMove move = {
    13189  srcOrigBlockIndex, dstOrigBlockIndex,
    13190  srcAllocOffset, dstAllocOffset,
    13191  srcAllocSize };
    13192  moves.push_back(move);
    13193  }
    13194  }
    13195  }
    13196  }
    13197 
    13198  m_BlockInfos.clear();
    13199 
    13200  PostprocessMetadata();
    13201 
    13202  return VK_SUCCESS;
    13203 }
    13204 
    13205 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
    13206 {
    13207  const size_t blockCount = m_pBlockVector->GetBlockCount();
    13208  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    13209  {
    13210  VmaBlockMetadata_Generic* const pMetadata =
    13211  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
    13212  pMetadata->m_FreeCount = 0;
    13213  pMetadata->m_SumFreeSize = pMetadata->GetSize();
    13214  pMetadata->m_FreeSuballocationsBySize.clear();
    13215  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
    13216  it != pMetadata->m_Suballocations.end(); )
    13217  {
    13218  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
    13219  {
    13220  VmaSuballocationList::iterator nextIt = it;
    13221  ++nextIt;
    13222  pMetadata->m_Suballocations.erase(it);
    13223  it = nextIt;
    13224  }
    13225  else
    13226  {
    13227  ++it;
    13228  }
    13229  }
    13230  }
    13231 }
    13232 
    13233 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
    13234 {
    13235  const size_t blockCount = m_pBlockVector->GetBlockCount();
    13236  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    13237  {
    13238  VmaBlockMetadata_Generic* const pMetadata =
    13239  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
    13240  const VkDeviceSize blockSize = pMetadata->GetSize();
    13241 
    13242  // No allocations in this block - entire area is free.
    13243  if(pMetadata->m_Suballocations.empty())
    13244  {
    13245  pMetadata->m_FreeCount = 1;
    13246  //pMetadata->m_SumFreeSize is already set to blockSize.
    13247  VmaSuballocation suballoc = {
    13248  0, // offset
    13249  blockSize, // size
    13250  VMA_NULL, // hAllocation
    13251  VMA_SUBALLOCATION_TYPE_FREE };
    13252  pMetadata->m_Suballocations.push_back(suballoc);
    13253  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
    13254  }
    13255  // There are some allocations in this block.
    13256  else
    13257  {
    13258  VkDeviceSize offset = 0;
    13259  VmaSuballocationList::iterator it;
    13260  for(it = pMetadata->m_Suballocations.begin();
    13261  it != pMetadata->m_Suballocations.end();
    13262  ++it)
    13263  {
    13264  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
    13265  VMA_ASSERT(it->offset >= offset);
    13266 
    13267  // Need to insert preceding free space.
    13268  if(it->offset > offset)
    13269  {
    13270  ++pMetadata->m_FreeCount;
    13271  const VkDeviceSize freeSize = it->offset - offset;
    13272  VmaSuballocation suballoc = {
    13273  offset, // offset
    13274  freeSize, // size
    13275  VMA_NULL, // hAllocation
    13276  VMA_SUBALLOCATION_TYPE_FREE };
    13277  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
    13278  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    13279  {
    13280  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
    13281  }
    13282  }
    13283 
    13284  pMetadata->m_SumFreeSize -= it->size;
    13285  offset = it->offset + it->size;
    13286  }
    13287 
    13288  // Need to insert trailing free space.
    13289  if(offset < blockSize)
    13290  {
    13291  ++pMetadata->m_FreeCount;
    13292  const VkDeviceSize freeSize = blockSize - offset;
    13293  VmaSuballocation suballoc = {
    13294  offset, // offset
    13295  freeSize, // size
    13296  VMA_NULL, // hAllocation
    13297  VMA_SUBALLOCATION_TYPE_FREE };
    13298  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
    13299  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
    13300  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    13301  {
    13302  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
    13303  }
    13304  }
    13305 
    13306  VMA_SORT(
    13307  pMetadata->m_FreeSuballocationsBySize.begin(),
    13308  pMetadata->m_FreeSuballocationsBySize.end(),
    13309  VmaSuballocationItemSizeLess());
    13310  }
    13311 
    13312  VMA_HEAVY_ASSERT(pMetadata->Validate());
    13313  }
    13314 }
    13315 
    13316 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
    13317 {
    13318  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
    13319  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
    13320  while(it != pMetadata->m_Suballocations.end())
    13321  {
    13322  if(it->offset < suballoc.offset)
    13323  {
    13324  ++it;
    13325  }
    13326  }
    13327  pMetadata->m_Suballocations.insert(it, suballoc);
    13328 }
    13329 
    13331 // VmaBlockVectorDefragmentationContext
    13332 
    13333 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
    13334  VmaAllocator hAllocator,
    13335  VmaPool hCustomPool,
    13336  VmaBlockVector* pBlockVector,
    13337  uint32_t currFrameIndex) :
    13338  res(VK_SUCCESS),
    13339  mutexLocked(false),
    13340  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
    13341  m_hAllocator(hAllocator),
    13342  m_hCustomPool(hCustomPool),
    13343  m_pBlockVector(pBlockVector),
    13344  m_CurrFrameIndex(currFrameIndex),
    13345  m_pAlgorithm(VMA_NULL),
    13346  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
    13347  m_AllAllocations(false)
    13348 {
    13349 }
    13350 
    13351 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
    13352 {
    13353  vma_delete(m_hAllocator, m_pAlgorithm);
    13354 }
    13355 
    13356 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    13357 {
    13358  AllocInfo info = { hAlloc, pChanged };
    13359  m_Allocations.push_back(info);
    13360 }
    13361 
    13362 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
    13363 {
    13364  const bool allAllocations = m_AllAllocations ||
    13365  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
    13366 
    13367  /********************************
    13368  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
    13369  ********************************/
    13370 
    13371  /*
    13372  Fast algorithm is supported only when certain criteria are met:
    13373  - VMA_DEBUG_MARGIN is 0.
    13374  - All allocations in this block vector are moveable.
    13375  - There is no possibility of image/buffer granularity conflict.
    13376  */
    13377  if(VMA_DEBUG_MARGIN == 0 &&
    13378  allAllocations &&
    13379  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
    13380  {
    13381  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
    13382  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
    13383  }
    13384  else
    13385  {
    13386  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
    13387  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
    13388  }
    13389 
    13390  if(allAllocations)
    13391  {
    13392  m_pAlgorithm->AddAll();
    13393  }
    13394  else
    13395  {
    13396  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
    13397  {
    13398  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
    13399  }
    13400  }
    13401 }
    13402 
    13404 // VmaDefragmentationContext
    13405 
    13406 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
    13407  VmaAllocator hAllocator,
    13408  uint32_t currFrameIndex,
    13409  uint32_t flags,
    13410  VmaDefragmentationStats* pStats) :
    13411  m_hAllocator(hAllocator),
    13412  m_CurrFrameIndex(currFrameIndex),
    13413  m_Flags(flags),
    13414  m_pStats(pStats),
    13415  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
    13416 {
    13417  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
    13418 }
    13419 
    13420 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
    13421 {
    13422  for(size_t i = m_CustomPoolContexts.size(); i--; )
    13423  {
    13424  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
    13425  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
    13426  vma_delete(m_hAllocator, pBlockVectorCtx);
    13427  }
    13428  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
    13429  {
    13430  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
    13431  if(pBlockVectorCtx)
    13432  {
    13433  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
    13434  vma_delete(m_hAllocator, pBlockVectorCtx);
    13435  }
    13436  }
    13437 }
    13438 
    13439 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
    13440 {
    13441  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    13442  {
    13443  VmaPool pool = pPools[poolIndex];
    13444  VMA_ASSERT(pool);
    13445  // Pools with algorithm other than default are not defragmented.
    13446  if(pool->m_BlockVector.GetAlgorithm() == 0)
    13447  {
    13448  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
    13449 
    13450  for(size_t i = m_CustomPoolContexts.size(); i--; )
    13451  {
    13452  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
    13453  {
    13454  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
    13455  break;
    13456  }
    13457  }
    13458 
    13459  if(!pBlockVectorDefragCtx)
    13460  {
    13461  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
    13462  m_hAllocator,
    13463  pool,
    13464  &pool->m_BlockVector,
    13465  m_CurrFrameIndex);
    13466  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
    13467  }
    13468 
    13469  pBlockVectorDefragCtx->AddAll();
    13470  }
    13471  }
    13472 }
    13473 
    13474 void VmaDefragmentationContext_T::AddAllocations(
    13475  uint32_t allocationCount,
    13476  VmaAllocation* pAllocations,
    13477  VkBool32* pAllocationsChanged)
    13478 {
    13479  // Dispatch pAllocations among defragmentators. Create them when necessary.
    13480  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    13481  {
    13482  const VmaAllocation hAlloc = pAllocations[allocIndex];
    13483  VMA_ASSERT(hAlloc);
    13484  // DedicatedAlloc cannot be defragmented.
    13485  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    13486  // Lost allocation cannot be defragmented.
    13487  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    13488  {
    13489  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
    13490 
    13491  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
    13492  // This allocation belongs to custom pool.
    13493  if(hAllocPool != VK_NULL_HANDLE)
    13494  {
    13495  // Pools with algorithm other than default are not defragmented.
    13496  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
    13497  {
    13498  for(size_t i = m_CustomPoolContexts.size(); i--; )
    13499  {
    13500  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
    13501  {
    13502  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
    13503  break;
    13504  }
    13505  }
    13506  if(!pBlockVectorDefragCtx)
    13507  {
    13508  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
    13509  m_hAllocator,
    13510  hAllocPool,
    13511  &hAllocPool->m_BlockVector,
    13512  m_CurrFrameIndex);
    13513  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
    13514  }
    13515  }
    13516  }
    13517  // This allocation belongs to default pool.
    13518  else
    13519  {
    13520  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    13521  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
    13522  if(!pBlockVectorDefragCtx)
    13523  {
    13524  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
    13525  m_hAllocator,
    13526  VMA_NULL, // hCustomPool
    13527  m_hAllocator->m_pBlockVectors[memTypeIndex],
    13528  m_CurrFrameIndex);
    13529  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
    13530  }
    13531  }
    13532 
    13533  if(pBlockVectorDefragCtx)
    13534  {
    13535  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    13536  &pAllocationsChanged[allocIndex] : VMA_NULL;
    13537  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
    13538  }
    13539  }
    13540  }
    13541 }
    13542 
    13543 VkResult VmaDefragmentationContext_T::Defragment(
    13544  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
    13545  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
    13546  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
    13547 {
    13548  if(pStats)
    13549  {
    13550  memset(pStats, 0, sizeof(VmaDefragmentationStats));
    13551  }
    13552 
    13553  if(commandBuffer == VK_NULL_HANDLE)
    13554  {
    13555  maxGpuBytesToMove = 0;
    13556  maxGpuAllocationsToMove = 0;
    13557  }
    13558 
    13559  VkResult res = VK_SUCCESS;
    13560 
    13561  // Process default pools.
    13562  for(uint32_t memTypeIndex = 0;
    13563  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
    13564  ++memTypeIndex)
    13565  {
    13566  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
    13567  if(pBlockVectorCtx)
    13568  {
    13569  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
    13570  pBlockVectorCtx->GetBlockVector()->Defragment(
    13571  pBlockVectorCtx,
    13572  pStats,
    13573  maxCpuBytesToMove, maxCpuAllocationsToMove,
    13574  maxGpuBytesToMove, maxGpuAllocationsToMove,
    13575  commandBuffer);
    13576  if(pBlockVectorCtx->res != VK_SUCCESS)
    13577  {
    13578  res = pBlockVectorCtx->res;
    13579  }
    13580  }
    13581  }
    13582 
    13583  // Process custom pools.
    13584  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
    13585  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
    13586  ++customCtxIndex)
    13587  {
    13588  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
    13589  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
    13590  pBlockVectorCtx->GetBlockVector()->Defragment(
    13591  pBlockVectorCtx,
    13592  pStats,
    13593  maxCpuBytesToMove, maxCpuAllocationsToMove,
    13594  maxGpuBytesToMove, maxGpuAllocationsToMove,
    13595  commandBuffer);
    13596  if(pBlockVectorCtx->res != VK_SUCCESS)
    13597  {
    13598  res = pBlockVectorCtx->res;
    13599  }
    13600  }
    13601 
    13602  return res;
    13603 }
    13604 
    13606 // VmaRecorder
    13607 
    13608 #if VMA_RECORDING_ENABLED
    13609 
    13610 VmaRecorder::VmaRecorder() :
    13611  m_UseMutex(true),
    13612  m_Flags(0),
    13613  m_File(VMA_NULL),
    13614  m_Freq(INT64_MAX),
    13615  m_StartCounter(INT64_MAX)
    13616 {
    13617 }
    13618 
    13619 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
    13620 {
    13621  m_UseMutex = useMutex;
    13622  m_Flags = settings.flags;
    13623 
    13624  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
    13625  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
    13626 
    13627  // Open file for writing.
    13628  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
    13629  if(err != 0)
    13630  {
    13631  return VK_ERROR_INITIALIZATION_FAILED;
    13632  }
    13633 
    13634  // Write header.
    13635  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
    13636  fprintf(m_File, "%s\n", "1,5");
    13637 
    13638  return VK_SUCCESS;
    13639 }
    13640 
    13641 VmaRecorder::~VmaRecorder()
    13642 {
    13643  if(m_File != VMA_NULL)
    13644  {
    13645  fclose(m_File);
    13646  }
    13647 }
    13648 
    13649 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
    13650 {
    13651  CallParams callParams;
    13652  GetBasicParams(callParams);
    13653 
    13654  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13655  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
    13656  Flush();
    13657 }
    13658 
    13659 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
    13660 {
    13661  CallParams callParams;
    13662  GetBasicParams(callParams);
    13663 
    13664  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13665  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
    13666  Flush();
    13667 }
    13668 
    13669 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
    13670 {
    13671  CallParams callParams;
    13672  GetBasicParams(callParams);
    13673 
    13674  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13675  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
    13676  createInfo.memoryTypeIndex,
    13677  createInfo.flags,
    13678  createInfo.blockSize,
    13679  (uint64_t)createInfo.minBlockCount,
    13680  (uint64_t)createInfo.maxBlockCount,
    13681  createInfo.frameInUseCount,
    13682  pool);
    13683  Flush();
    13684 }
    13685 
    13686 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
    13687 {
    13688  CallParams callParams;
    13689  GetBasicParams(callParams);
    13690 
    13691  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13692  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
    13693  pool);
    13694  Flush();
    13695 }
    13696 
    13697 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
    13698  const VkMemoryRequirements& vkMemReq,
    13699  const VmaAllocationCreateInfo& createInfo,
    13700  VmaAllocation allocation)
    13701 {
    13702  CallParams callParams;
    13703  GetBasicParams(callParams);
    13704 
    13705  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13706  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13707  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13708  vkMemReq.size,
    13709  vkMemReq.alignment,
    13710  vkMemReq.memoryTypeBits,
    13711  createInfo.flags,
    13712  createInfo.usage,
    13713  createInfo.requiredFlags,
    13714  createInfo.preferredFlags,
    13715  createInfo.memoryTypeBits,
    13716  createInfo.pool,
    13717  allocation,
    13718  userDataStr.GetString());
    13719  Flush();
    13720 }
    13721 
    13722 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
    13723  const VkMemoryRequirements& vkMemReq,
    13724  const VmaAllocationCreateInfo& createInfo,
    13725  uint64_t allocationCount,
    13726  const VmaAllocation* pAllocations)
    13727 {
    13728  CallParams callParams;
    13729  GetBasicParams(callParams);
    13730 
    13731  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13732  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13733  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
    13734  vkMemReq.size,
    13735  vkMemReq.alignment,
    13736  vkMemReq.memoryTypeBits,
    13737  createInfo.flags,
    13738  createInfo.usage,
    13739  createInfo.requiredFlags,
    13740  createInfo.preferredFlags,
    13741  createInfo.memoryTypeBits,
    13742  createInfo.pool);
    13743  PrintPointerList(allocationCount, pAllocations);
    13744  fprintf(m_File, ",%s\n", userDataStr.GetString());
    13745  Flush();
    13746 }
    13747 
    13748 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    13749  const VkMemoryRequirements& vkMemReq,
    13750  bool requiresDedicatedAllocation,
    13751  bool prefersDedicatedAllocation,
    13752  const VmaAllocationCreateInfo& createInfo,
    13753  VmaAllocation allocation)
    13754 {
    13755  CallParams callParams;
    13756  GetBasicParams(callParams);
    13757 
    13758  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13759  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13760  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13761  vkMemReq.size,
    13762  vkMemReq.alignment,
    13763  vkMemReq.memoryTypeBits,
    13764  requiresDedicatedAllocation ? 1 : 0,
    13765  prefersDedicatedAllocation ? 1 : 0,
    13766  createInfo.flags,
    13767  createInfo.usage,
    13768  createInfo.requiredFlags,
    13769  createInfo.preferredFlags,
    13770  createInfo.memoryTypeBits,
    13771  createInfo.pool,
    13772  allocation,
    13773  userDataStr.GetString());
    13774  Flush();
    13775 }
    13776 
    13777 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
    13778  const VkMemoryRequirements& vkMemReq,
    13779  bool requiresDedicatedAllocation,
    13780  bool prefersDedicatedAllocation,
    13781  const VmaAllocationCreateInfo& createInfo,
    13782  VmaAllocation allocation)
    13783 {
    13784  CallParams callParams;
    13785  GetBasicParams(callParams);
    13786 
    13787  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13788  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13789  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13790  vkMemReq.size,
    13791  vkMemReq.alignment,
    13792  vkMemReq.memoryTypeBits,
    13793  requiresDedicatedAllocation ? 1 : 0,
    13794  prefersDedicatedAllocation ? 1 : 0,
    13795  createInfo.flags,
    13796  createInfo.usage,
    13797  createInfo.requiredFlags,
    13798  createInfo.preferredFlags,
    13799  createInfo.memoryTypeBits,
    13800  createInfo.pool,
    13801  allocation,
    13802  userDataStr.GetString());
    13803  Flush();
    13804 }
    13805 
    13806 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
    13807  VmaAllocation allocation)
    13808 {
    13809  CallParams callParams;
    13810  GetBasicParams(callParams);
    13811 
    13812  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13813  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    13814  allocation);
    13815  Flush();
    13816 }
    13817 
    13818 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
    13819  uint64_t allocationCount,
    13820  const VmaAllocation* pAllocations)
    13821 {
    13822  CallParams callParams;
    13823  GetBasicParams(callParams);
    13824 
    13825  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13826  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
    13827  PrintPointerList(allocationCount, pAllocations);
    13828  fprintf(m_File, "\n");
    13829  Flush();
    13830 }
    13831 
    13832 void VmaRecorder::RecordResizeAllocation(
    13833  uint32_t frameIndex,
    13834  VmaAllocation allocation,
    13835  VkDeviceSize newSize)
    13836 {
    13837  CallParams callParams;
    13838  GetBasicParams(callParams);
    13839 
    13840  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13841  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
    13842  allocation, newSize);
    13843  Flush();
    13844 }
    13845 
    13846 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
    13847  VmaAllocation allocation,
    13848  const void* pUserData)
    13849 {
    13850  CallParams callParams;
    13851  GetBasicParams(callParams);
    13852 
    13853  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13854  UserDataString userDataStr(
    13855  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
    13856  pUserData);
    13857  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13858  allocation,
    13859  userDataStr.GetString());
    13860  Flush();
    13861 }
    13862 
    13863 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
    13864  VmaAllocation allocation)
    13865 {
    13866  CallParams callParams;
    13867  GetBasicParams(callParams);
    13868 
    13869  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13870  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    13871  allocation);
    13872  Flush();
    13873 }
    13874 
    13875 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
    13876  VmaAllocation allocation)
    13877 {
    13878  CallParams callParams;
    13879  GetBasicParams(callParams);
    13880 
    13881  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13882  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    13883  allocation);
    13884  Flush();
    13885 }
    13886 
    13887 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
    13888  VmaAllocation allocation)
    13889 {
    13890  CallParams callParams;
    13891  GetBasicParams(callParams);
    13892 
    13893  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13894  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    13895  allocation);
    13896  Flush();
    13897 }
    13898 
    13899 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
    13900  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    13901 {
    13902  CallParams callParams;
    13903  GetBasicParams(callParams);
    13904 
    13905  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13906  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    13907  allocation,
    13908  offset,
    13909  size);
    13910  Flush();
    13911 }
    13912 
    13913 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
    13914  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    13915 {
    13916  CallParams callParams;
    13917  GetBasicParams(callParams);
    13918 
    13919  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13920  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    13921  allocation,
    13922  offset,
    13923  size);
    13924  Flush();
    13925 }
    13926 
    13927 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
    13928  const VkBufferCreateInfo& bufCreateInfo,
    13929  const VmaAllocationCreateInfo& allocCreateInfo,
    13930  VmaAllocation allocation)
    13931 {
    13932  CallParams callParams;
    13933  GetBasicParams(callParams);
    13934 
    13935  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13936  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    13937  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13938  bufCreateInfo.flags,
    13939  bufCreateInfo.size,
    13940  bufCreateInfo.usage,
    13941  bufCreateInfo.sharingMode,
    13942  allocCreateInfo.flags,
    13943  allocCreateInfo.usage,
    13944  allocCreateInfo.requiredFlags,
    13945  allocCreateInfo.preferredFlags,
    13946  allocCreateInfo.memoryTypeBits,
    13947  allocCreateInfo.pool,
    13948  allocation,
    13949  userDataStr.GetString());
    13950  Flush();
    13951 }
    13952 
    13953 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
    13954  const VkImageCreateInfo& imageCreateInfo,
    13955  const VmaAllocationCreateInfo& allocCreateInfo,
    13956  VmaAllocation allocation)
    13957 {
    13958  CallParams callParams;
    13959  GetBasicParams(callParams);
    13960 
    13961  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13962  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    13963  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13964  imageCreateInfo.flags,
    13965  imageCreateInfo.imageType,
    13966  imageCreateInfo.format,
    13967  imageCreateInfo.extent.width,
    13968  imageCreateInfo.extent.height,
    13969  imageCreateInfo.extent.depth,
    13970  imageCreateInfo.mipLevels,
    13971  imageCreateInfo.arrayLayers,
    13972  imageCreateInfo.samples,
    13973  imageCreateInfo.tiling,
    13974  imageCreateInfo.usage,
    13975  imageCreateInfo.sharingMode,
    13976  imageCreateInfo.initialLayout,
    13977  allocCreateInfo.flags,
    13978  allocCreateInfo.usage,
    13979  allocCreateInfo.requiredFlags,
    13980  allocCreateInfo.preferredFlags,
    13981  allocCreateInfo.memoryTypeBits,
    13982  allocCreateInfo.pool,
    13983  allocation,
    13984  userDataStr.GetString());
    13985  Flush();
    13986 }
    13987 
    13988 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
    13989  VmaAllocation allocation)
    13990 {
    13991  CallParams callParams;
    13992  GetBasicParams(callParams);
    13993 
    13994  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13995  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
    13996  allocation);
    13997  Flush();
    13998 }
    13999 
    14000 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
    14001  VmaAllocation allocation)
    14002 {
    14003  CallParams callParams;
    14004  GetBasicParams(callParams);
    14005 
    14006  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    14007  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
    14008  allocation);
    14009  Flush();
    14010 }
    14011 
    14012 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
    14013  VmaAllocation allocation)
    14014 {
    14015  CallParams callParams;
    14016  GetBasicParams(callParams);
    14017 
    14018  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    14019  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    14020  allocation);
    14021  Flush();
    14022 }
    14023 
    14024 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
    14025  VmaAllocation allocation)
    14026 {
    14027  CallParams callParams;
    14028  GetBasicParams(callParams);
    14029 
    14030  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    14031  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
    14032  allocation);
    14033  Flush();
    14034 }
    14035 
    14036 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
    14037  VmaPool pool)
    14038 {
    14039  CallParams callParams;
    14040  GetBasicParams(callParams);
    14041 
    14042  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    14043  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
    14044  pool);
    14045  Flush();
    14046 }
    14047 
    14048 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
    14049  const VmaDefragmentationInfo2& info,
    14051 {
    14052  CallParams callParams;
    14053  GetBasicParams(callParams);
    14054 
    14055  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    14056  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
    14057  info.flags);
    14058  PrintPointerList(info.allocationCount, info.pAllocations);
    14059  fprintf(m_File, ",");
    14060  PrintPointerList(info.poolCount, info.pPools);
    14061  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
    14062  info.maxCpuBytesToMove,
    14064  info.maxGpuBytesToMove,
    14066  info.commandBuffer,
    14067  ctx);
    14068  Flush();
    14069 }
    14070 
    14071 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
    14073 {
    14074  CallParams callParams;
    14075  GetBasicParams(callParams);
    14076 
    14077  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    14078  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
    14079  ctx);
    14080  Flush();
    14081 }
    14082 
    14083 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
    14084 {
    14085  if(pUserData != VMA_NULL)
    14086  {
    14087  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
    14088  {
    14089  m_Str = (const char*)pUserData;
    14090  }
    14091  else
    14092  {
    14093  sprintf_s(m_PtrStr, "%p", pUserData);
    14094  m_Str = m_PtrStr;
    14095  }
    14096  }
    14097  else
    14098  {
    14099  m_Str = "";
    14100  }
    14101 }
    14102 
    14103 void VmaRecorder::WriteConfiguration(
    14104  const VkPhysicalDeviceProperties& devProps,
    14105  const VkPhysicalDeviceMemoryProperties& memProps,
    14106  bool dedicatedAllocationExtensionEnabled)
    14107 {
    14108  fprintf(m_File, "Config,Begin\n");
    14109 
    14110  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
    14111  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
    14112  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
    14113  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
    14114  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
    14115  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
    14116 
    14117  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
    14118  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
    14119  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
    14120 
    14121  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
    14122  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
    14123  {
    14124  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
    14125  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
    14126  }
    14127  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
    14128  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
    14129  {
    14130  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
    14131  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
    14132  }
    14133 
    14134  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
    14135 
    14136  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
    14137  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
    14138  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
    14139  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
    14140  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
    14141  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
    14142  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
    14143  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
    14144  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    14145 
    14146  fprintf(m_File, "Config,End\n");
    14147 }
    14148 
    14149 void VmaRecorder::GetBasicParams(CallParams& outParams)
    14150 {
    14151  outParams.threadId = GetCurrentThreadId();
    14152 
    14153  LARGE_INTEGER counter;
    14154  QueryPerformanceCounter(&counter);
    14155  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
    14156 }
    14157 
    14158 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
    14159 {
    14160  if(count)
    14161  {
    14162  fprintf(m_File, "%p", pItems[0]);
    14163  for(uint64_t i = 1; i < count; ++i)
    14164  {
    14165  fprintf(m_File, " %p", pItems[i]);
    14166  }
    14167  }
    14168 }
    14169 
    14170 void VmaRecorder::Flush()
    14171 {
    14172  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
    14173  {
    14174  fflush(m_File);
    14175  }
    14176 }
    14177 
    14178 #endif // #if VMA_RECORDING_ENABLED
    14179 
    14181 // VmaAllocationObjectAllocator
    14182 
    14183 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
    14184  m_Allocator(pAllocationCallbacks, 1024)
    14185 {
    14186 }
    14187 
    14188 VmaAllocation VmaAllocationObjectAllocator::Allocate()
    14189 {
    14190  VmaMutexLock mutexLock(m_Mutex);
    14191  return m_Allocator.Alloc();
    14192 }
    14193 
    14194 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
    14195 {
    14196  VmaMutexLock mutexLock(m_Mutex);
    14197  m_Allocator.Free(hAlloc);
    14198 }
    14199 
    14201 // VmaAllocator_T
    14202 
    14203 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    14204  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    14205  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    14206  m_hDevice(pCreateInfo->device),
    14207  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    14208  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    14209  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    14210  m_AllocationObjectAllocator(&m_AllocationCallbacks),
    14211  m_PreferredLargeHeapBlockSize(0),
    14212  m_PhysicalDevice(pCreateInfo->physicalDevice),
    14213  m_CurrentFrameIndex(0),
    14214  m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
    14215  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
    14216  m_NextPoolId(0)
    14218  ,m_pRecorder(VMA_NULL)
    14219 #endif
    14220 {
    14221  if(VMA_DEBUG_DETECT_CORRUPTION)
    14222  {
    14223  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
    14224  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
    14225  }
    14226 
    14227  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    14228 
    14229 #if !(VMA_DEDICATED_ALLOCATION)
    14231  {
    14232  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
    14233  }
    14234 #endif
    14235 
    14236  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    14237  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    14238  memset(&m_MemProps, 0, sizeof(m_MemProps));
    14239 
    14240  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    14241  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    14242 
    14243  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    14244  {
    14245  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    14246  }
    14247 
    14248  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    14249  {
    14250  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    14251  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    14252  }
    14253 
    14254  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    14255 
    14256  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    14257  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    14258 
    14259  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
    14260  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
    14261  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
    14262  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
    14263 
    14264  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    14265  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    14266 
    14267  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    14268  {
    14269  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    14270  {
    14271  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    14272  if(limit != VK_WHOLE_SIZE)
    14273  {
    14274  m_HeapSizeLimit[heapIndex] = limit;
    14275  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    14276  {
    14277  m_MemProps.memoryHeaps[heapIndex].size = limit;
    14278  }
    14279  }
    14280  }
    14281  }
    14282 
    14283  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    14284  {
    14285  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    14286 
    14287  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    14288  this,
    14289  VK_NULL_HANDLE, // hParentPool
    14290  memTypeIndex,
    14291  preferredBlockSize,
    14292  0,
    14293  SIZE_MAX,
    14294  GetBufferImageGranularity(),
    14295  pCreateInfo->frameInUseCount,
    14296  false, // isCustomPool
    14297  false, // explicitBlockSize
    14298  false); // linearAlgorithm
    14299  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    14300  // becase minBlockCount is 0.
    14301  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    14302 
    14303  }
    14304 }
    14305 
    14306 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
    14307 {
    14308  VkResult res = VK_SUCCESS;
    14309 
    14310  if(pCreateInfo->pRecordSettings != VMA_NULL &&
    14311  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
    14312  {
    14313 #if VMA_RECORDING_ENABLED
    14314  m_pRecorder = vma_new(this, VmaRecorder)();
    14315  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
    14316  if(res != VK_SUCCESS)
    14317  {
    14318  return res;
    14319  }
    14320  m_pRecorder->WriteConfiguration(
    14321  m_PhysicalDeviceProperties,
    14322  m_MemProps,
    14323  m_UseKhrDedicatedAllocation);
    14324  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
    14325 #else
    14326  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
    14327  return VK_ERROR_FEATURE_NOT_PRESENT;
    14328 #endif
    14329  }
    14330 
    14331  return res;
    14332 }
    14333 
    14334 VmaAllocator_T::~VmaAllocator_T()
    14335 {
    14336 #if VMA_RECORDING_ENABLED
    14337  if(m_pRecorder != VMA_NULL)
    14338  {
    14339  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
    14340  vma_delete(this, m_pRecorder);
    14341  }
    14342 #endif
    14343 
    14344  VMA_ASSERT(m_Pools.empty());
    14345 
    14346  for(size_t i = GetMemoryTypeCount(); i--; )
    14347  {
    14348  if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
    14349  {
    14350  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
    14351  }
    14352 
    14353  vma_delete(this, m_pDedicatedAllocations[i]);
    14354  vma_delete(this, m_pBlockVectors[i]);
    14355  }
    14356 }
    14357 
    14358 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    14359 {
    14360 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    14361  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
    14362  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
    14363  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
    14364  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
    14365  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
    14366  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
    14367  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
    14368  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
    14369  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
    14370  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
    14371  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
    14372  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
    14373  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
    14374  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
    14375  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
    14376  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
    14377  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
    14378 #if VMA_DEDICATED_ALLOCATION
    14379  if(m_UseKhrDedicatedAllocation)
    14380  {
    14381  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
    14382  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
    14383  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
    14384  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
    14385  }
    14386 #endif // #if VMA_DEDICATED_ALLOCATION
    14387 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    14388 
    14389 #define VMA_COPY_IF_NOT_NULL(funcName) \
    14390  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    14391 
    14392  if(pVulkanFunctions != VMA_NULL)
    14393  {
    14394  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    14395  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    14396  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    14397  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    14398  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    14399  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    14400  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
    14401  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
    14402  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    14403  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    14404  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    14405  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    14406  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    14407  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    14408  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    14409  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    14410  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
    14411 #if VMA_DEDICATED_ALLOCATION
    14412  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    14413  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    14414 #endif
    14415  }
    14416 
    14417 #undef VMA_COPY_IF_NOT_NULL
    14418 
    14419  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    14420  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    14421  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    14422  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    14423  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    14424  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    14425  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    14426  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    14427  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
    14428  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
    14429  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    14430  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    14431  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    14432  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    14433  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    14434  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    14435  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    14436  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    14437  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
    14438 #if VMA_DEDICATED_ALLOCATION
    14439  if(m_UseKhrDedicatedAllocation)
    14440  {
    14441  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    14442  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    14443  }
    14444 #endif
    14445 }
    14446 
    14447 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    14448 {
    14449  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    14450  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    14451  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
    14452  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
    14453 }
    14454 
    14455 VkResult VmaAllocator_T::AllocateMemoryOfType(
    14456  VkDeviceSize size,
    14457  VkDeviceSize alignment,
    14458  bool dedicatedAllocation,
    14459  VkBuffer dedicatedBuffer,
    14460  VkImage dedicatedImage,
    14461  const VmaAllocationCreateInfo& createInfo,
    14462  uint32_t memTypeIndex,
    14463  VmaSuballocationType suballocType,
    14464  size_t allocationCount,
    14465  VmaAllocation* pAllocations)
    14466 {
    14467  VMA_ASSERT(pAllocations != VMA_NULL);
    14468  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
    14469 
    14470  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    14471 
    14472  // If memory type is not HOST_VISIBLE, disable MAPPED.
    14473  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    14474  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    14475  {
    14476  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    14477  }
    14478 
    14479  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    14480  VMA_ASSERT(blockVector);
    14481 
    14482  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    14483  bool preferDedicatedMemory =
    14484  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    14485  dedicatedAllocation ||
    14486  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    14487  size > preferredBlockSize / 2;
    14488 
    14489  if(preferDedicatedMemory &&
    14490  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    14491  finalCreateInfo.pool == VK_NULL_HANDLE)
    14492  {
    14494  }
    14495 
    14496  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    14497  {
    14498  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14499  {
    14500  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14501  }
    14502  else
    14503  {
    14504  return AllocateDedicatedMemory(
    14505  size,
    14506  suballocType,
    14507  memTypeIndex,
    14508  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    14509  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    14510  finalCreateInfo.pUserData,
    14511  dedicatedBuffer,
    14512  dedicatedImage,
    14513  allocationCount,
    14514  pAllocations);
    14515  }
    14516  }
    14517  else
    14518  {
    14519  VkResult res = blockVector->Allocate(
    14520  m_CurrentFrameIndex.load(),
    14521  size,
    14522  alignment,
    14523  finalCreateInfo,
    14524  suballocType,
    14525  allocationCount,
    14526  pAllocations);
    14527  if(res == VK_SUCCESS)
    14528  {
    14529  return res;
    14530  }
    14531 
    14532  // 5. Try dedicated memory.
    14533  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14534  {
    14535  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14536  }
    14537  else
    14538  {
    14539  res = AllocateDedicatedMemory(
    14540  size,
    14541  suballocType,
    14542  memTypeIndex,
    14543  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    14544  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    14545  finalCreateInfo.pUserData,
    14546  dedicatedBuffer,
    14547  dedicatedImage,
    14548  allocationCount,
    14549  pAllocations);
    14550  if(res == VK_SUCCESS)
    14551  {
    14552  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    14553  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    14554  return VK_SUCCESS;
    14555  }
    14556  else
    14557  {
    14558  // Everything failed: Return error code.
    14559  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    14560  return res;
    14561  }
    14562  }
    14563  }
    14564 }
    14565 
    14566 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    14567  VkDeviceSize size,
    14568  VmaSuballocationType suballocType,
    14569  uint32_t memTypeIndex,
    14570  bool map,
    14571  bool isUserDataString,
    14572  void* pUserData,
    14573  VkBuffer dedicatedBuffer,
    14574  VkImage dedicatedImage,
    14575  size_t allocationCount,
    14576  VmaAllocation* pAllocations)
    14577 {
    14578  VMA_ASSERT(allocationCount > 0 && pAllocations);
    14579 
    14580  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    14581  allocInfo.memoryTypeIndex = memTypeIndex;
    14582  allocInfo.allocationSize = size;
    14583 
    14584 #if VMA_DEDICATED_ALLOCATION
    14585  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    14586  if(m_UseKhrDedicatedAllocation)
    14587  {
    14588  if(dedicatedBuffer != VK_NULL_HANDLE)
    14589  {
    14590  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    14591  dedicatedAllocInfo.buffer = dedicatedBuffer;
    14592  allocInfo.pNext = &dedicatedAllocInfo;
    14593  }
    14594  else if(dedicatedImage != VK_NULL_HANDLE)
    14595  {
    14596  dedicatedAllocInfo.image = dedicatedImage;
    14597  allocInfo.pNext = &dedicatedAllocInfo;
    14598  }
    14599  }
    14600 #endif // #if VMA_DEDICATED_ALLOCATION
    14601 
    14602  size_t allocIndex;
    14603  VkResult res = VK_SUCCESS;
    14604  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    14605  {
    14606  res = AllocateDedicatedMemoryPage(
    14607  size,
    14608  suballocType,
    14609  memTypeIndex,
    14610  allocInfo,
    14611  map,
    14612  isUserDataString,
    14613  pUserData,
    14614  pAllocations + allocIndex);
    14615  if(res != VK_SUCCESS)
    14616  {
    14617  break;
    14618  }
    14619  }
    14620 
    14621  if(res == VK_SUCCESS)
    14622  {
    14623  // Register them in m_pDedicatedAllocations.
    14624  {
    14625  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    14626  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    14627  VMA_ASSERT(pDedicatedAllocations);
    14628  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    14629  {
    14630  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
    14631  }
    14632  }
    14633 
    14634  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
    14635  }
    14636  else
    14637  {
    14638  // Free all already created allocations.
    14639  while(allocIndex--)
    14640  {
    14641  VmaAllocation currAlloc = pAllocations[allocIndex];
    14642  VkDeviceMemory hMemory = currAlloc->GetMemory();
    14643 
    14644  /*
    14645  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    14646  before vkFreeMemory.
    14647 
    14648  if(currAlloc->GetMappedData() != VMA_NULL)
    14649  {
    14650  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    14651  }
    14652  */
    14653 
    14654  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
    14655 
    14656  currAlloc->SetUserData(this, VMA_NULL);
    14657  currAlloc->Dtor();
    14658  m_AllocationObjectAllocator.Free(currAlloc);
    14659  }
    14660 
    14661  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
    14662  }
    14663 
    14664  return res;
    14665 }
    14666 
    14667 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
    14668  VkDeviceSize size,
    14669  VmaSuballocationType suballocType,
    14670  uint32_t memTypeIndex,
    14671  const VkMemoryAllocateInfo& allocInfo,
    14672  bool map,
    14673  bool isUserDataString,
    14674  void* pUserData,
    14675  VmaAllocation* pAllocation)
    14676 {
    14677  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    14678  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    14679  if(res < 0)
    14680  {
    14681  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    14682  return res;
    14683  }
    14684 
    14685  void* pMappedData = VMA_NULL;
    14686  if(map)
    14687  {
    14688  res = (*m_VulkanFunctions.vkMapMemory)(
    14689  m_hDevice,
    14690  hMemory,
    14691  0,
    14692  VK_WHOLE_SIZE,
    14693  0,
    14694  &pMappedData);
    14695  if(res < 0)
    14696  {
    14697  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    14698  FreeVulkanMemory(memTypeIndex, size, hMemory);
    14699  return res;
    14700  }
    14701  }
    14702 
    14703  *pAllocation = m_AllocationObjectAllocator.Allocate();
    14704  (*pAllocation)->Ctor(m_CurrentFrameIndex.load(), isUserDataString);
    14705  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
    14706  (*pAllocation)->SetUserData(this, pUserData);
    14707  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    14708  {
    14709  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    14710  }
    14711 
    14712  return VK_SUCCESS;
    14713 }
    14714 
    14715 void VmaAllocator_T::GetBufferMemoryRequirements(
    14716  VkBuffer hBuffer,
    14717  VkMemoryRequirements& memReq,
    14718  bool& requiresDedicatedAllocation,
    14719  bool& prefersDedicatedAllocation) const
    14720 {
    14721 #if VMA_DEDICATED_ALLOCATION
    14722  if(m_UseKhrDedicatedAllocation)
    14723  {
    14724  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    14725  memReqInfo.buffer = hBuffer;
    14726 
    14727  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    14728 
    14729  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    14730  memReq2.pNext = &memDedicatedReq;
    14731 
    14732  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    14733 
    14734  memReq = memReq2.memoryRequirements;
    14735  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    14736  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    14737  }
    14738  else
    14739 #endif // #if VMA_DEDICATED_ALLOCATION
    14740  {
    14741  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    14742  requiresDedicatedAllocation = false;
    14743  prefersDedicatedAllocation = false;
    14744  }
    14745 }
    14746 
    14747 void VmaAllocator_T::GetImageMemoryRequirements(
    14748  VkImage hImage,
    14749  VkMemoryRequirements& memReq,
    14750  bool& requiresDedicatedAllocation,
    14751  bool& prefersDedicatedAllocation) const
    14752 {
    14753 #if VMA_DEDICATED_ALLOCATION
    14754  if(m_UseKhrDedicatedAllocation)
    14755  {
    14756  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    14757  memReqInfo.image = hImage;
    14758 
    14759  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    14760 
    14761  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    14762  memReq2.pNext = &memDedicatedReq;
    14763 
    14764  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    14765 
    14766  memReq = memReq2.memoryRequirements;
    14767  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    14768  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    14769  }
    14770  else
    14771 #endif // #if VMA_DEDICATED_ALLOCATION
    14772  {
    14773  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    14774  requiresDedicatedAllocation = false;
    14775  prefersDedicatedAllocation = false;
    14776  }
    14777 }
    14778 
    14779 VkResult VmaAllocator_T::AllocateMemory(
    14780  const VkMemoryRequirements& vkMemReq,
    14781  bool requiresDedicatedAllocation,
    14782  bool prefersDedicatedAllocation,
    14783  VkBuffer dedicatedBuffer,
    14784  VkImage dedicatedImage,
    14785  const VmaAllocationCreateInfo& createInfo,
    14786  VmaSuballocationType suballocType,
    14787  size_t allocationCount,
    14788  VmaAllocation* pAllocations)
    14789 {
    14790  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
    14791 
    14792  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
    14793 
    14794  if(vkMemReq.size == 0)
    14795  {
    14796  return VK_ERROR_VALIDATION_FAILED_EXT;
    14797  }
    14798  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    14799  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14800  {
    14801  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    14802  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14803  }
    14804  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    14806  {
    14807  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    14808  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14809  }
    14810  if(requiresDedicatedAllocation)
    14811  {
    14812  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14813  {
    14814  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    14815  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14816  }
    14817  if(createInfo.pool != VK_NULL_HANDLE)
    14818  {
    14819  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    14820  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14821  }
    14822  }
    14823  if((createInfo.pool != VK_NULL_HANDLE) &&
    14824  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    14825  {
    14826  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    14827  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14828  }
    14829 
    14830  if(createInfo.pool != VK_NULL_HANDLE)
    14831  {
    14832  const VkDeviceSize alignmentForPool = VMA_MAX(
    14833  vkMemReq.alignment,
    14834  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
    14835 
    14836  VmaAllocationCreateInfo createInfoForPool = createInfo;
    14837  // If memory type is not HOST_VISIBLE, disable MAPPED.
    14838  if((createInfoForPool.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    14839  (m_MemProps.memoryTypes[createInfo.pool->m_BlockVector.GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    14840  {
    14841  createInfoForPool.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    14842  }
    14843 
    14844  return createInfo.pool->m_BlockVector.Allocate(
    14845  m_CurrentFrameIndex.load(),
    14846  vkMemReq.size,
    14847  alignmentForPool,
    14848  createInfoForPool,
    14849  suballocType,
    14850  allocationCount,
    14851  pAllocations);
    14852  }
    14853  else
    14854  {
    14855  // Bit mask of memory Vulkan types acceptable for this allocation.
    14856  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    14857  uint32_t memTypeIndex = UINT32_MAX;
    14858  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    14859  if(res == VK_SUCCESS)
    14860  {
    14861  VkDeviceSize alignmentForMemType = VMA_MAX(
    14862  vkMemReq.alignment,
    14863  GetMemoryTypeMinAlignment(memTypeIndex));
    14864 
    14865  res = AllocateMemoryOfType(
    14866  vkMemReq.size,
    14867  alignmentForMemType,
    14868  requiresDedicatedAllocation || prefersDedicatedAllocation,
    14869  dedicatedBuffer,
    14870  dedicatedImage,
    14871  createInfo,
    14872  memTypeIndex,
    14873  suballocType,
    14874  allocationCount,
    14875  pAllocations);
    14876  // Succeeded on first try.
    14877  if(res == VK_SUCCESS)
    14878  {
    14879  return res;
    14880  }
    14881  // Allocation from this memory type failed. Try other compatible memory types.
    14882  else
    14883  {
    14884  for(;;)
    14885  {
    14886  // Remove old memTypeIndex from list of possibilities.
    14887  memoryTypeBits &= ~(1u << memTypeIndex);
    14888  // Find alternative memTypeIndex.
    14889  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    14890  if(res == VK_SUCCESS)
    14891  {
    14892  alignmentForMemType = VMA_MAX(
    14893  vkMemReq.alignment,
    14894  GetMemoryTypeMinAlignment(memTypeIndex));
    14895 
    14896  res = AllocateMemoryOfType(
    14897  vkMemReq.size,
    14898  alignmentForMemType,
    14899  requiresDedicatedAllocation || prefersDedicatedAllocation,
    14900  dedicatedBuffer,
    14901  dedicatedImage,
    14902  createInfo,
    14903  memTypeIndex,
    14904  suballocType,
    14905  allocationCount,
    14906  pAllocations);
    14907  // Allocation from this alternative memory type succeeded.
    14908  if(res == VK_SUCCESS)
    14909  {
    14910  return res;
    14911  }
    14912  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    14913  }
    14914  // No other matching memory type index could be found.
    14915  else
    14916  {
    14917  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    14918  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14919  }
    14920  }
    14921  }
    14922  }
    14923  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    14924  else
    14925  return res;
    14926  }
    14927 }
    14928 
    14929 void VmaAllocator_T::FreeMemory(
    14930  size_t allocationCount,
    14931  const VmaAllocation* pAllocations)
    14932 {
    14933  VMA_ASSERT(pAllocations);
    14934 
    14935  for(size_t allocIndex = allocationCount; allocIndex--; )
    14936  {
    14937  VmaAllocation allocation = pAllocations[allocIndex];
    14938 
    14939  if(allocation != VK_NULL_HANDLE)
    14940  {
    14941  if(TouchAllocation(allocation))
    14942  {
    14943  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    14944  {
    14945  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
    14946  }
    14947 
    14948  switch(allocation->GetType())
    14949  {
    14950  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    14951  {
    14952  VmaBlockVector* pBlockVector = VMA_NULL;
    14953  VmaPool hPool = allocation->GetBlock()->GetParentPool();
    14954  if(hPool != VK_NULL_HANDLE)
    14955  {
    14956  pBlockVector = &hPool->m_BlockVector;
    14957  }
    14958  else
    14959  {
    14960  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    14961  pBlockVector = m_pBlockVectors[memTypeIndex];
    14962  }
    14963  pBlockVector->Free(allocation);
    14964  }
    14965  break;
    14966  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    14967  FreeDedicatedMemory(allocation);
    14968  break;
    14969  default:
    14970  VMA_ASSERT(0);
    14971  }
    14972  }
    14973 
    14974  allocation->SetUserData(this, VMA_NULL);
    14975  allocation->Dtor();
    14976  m_AllocationObjectAllocator.Free(allocation);
    14977  }
    14978  }
    14979 }
    14980 
    14981 VkResult VmaAllocator_T::ResizeAllocation(
    14982  const VmaAllocation alloc,
    14983  VkDeviceSize newSize)
    14984 {
    14985  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
    14986  {
    14987  return VK_ERROR_VALIDATION_FAILED_EXT;
    14988  }
    14989  if(newSize == alloc->GetSize())
    14990  {
    14991  return VK_SUCCESS;
    14992  }
    14993 
    14994  switch(alloc->GetType())
    14995  {
    14996  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    14997  return VK_ERROR_FEATURE_NOT_PRESENT;
    14998  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    14999  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
    15000  {
    15001  alloc->ChangeSize(newSize);
    15002  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
    15003  return VK_SUCCESS;
    15004  }
    15005  else
    15006  {
    15007  return VK_ERROR_OUT_OF_POOL_MEMORY;
    15008  }
    15009  default:
    15010  VMA_ASSERT(0);
    15011  return VK_ERROR_VALIDATION_FAILED_EXT;
    15012  }
    15013 }
    15014 
    15015 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    15016 {
    15017  // Initialize.
    15018  InitStatInfo(pStats->total);
    15019  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    15020  InitStatInfo(pStats->memoryType[i]);
    15021  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    15022  InitStatInfo(pStats->memoryHeap[i]);
    15023 
    15024  // Process default pools.
    15025  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15026  {
    15027  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    15028  VMA_ASSERT(pBlockVector);
    15029  pBlockVector->AddStats(pStats);
    15030  }
    15031 
    15032  // Process custom pools.
    15033  {
    15034  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
    15035  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    15036  {
    15037  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
    15038  }
    15039  }
    15040 
    15041  // Process dedicated allocations.
    15042  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15043  {
    15044  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    15045  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    15046  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    15047  VMA_ASSERT(pDedicatedAllocVector);
    15048  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    15049  {
    15050  VmaStatInfo allocationStatInfo;
    15051  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    15052  VmaAddStatInfo(pStats->total, allocationStatInfo);
    15053  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    15054  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    15055  }
    15056  }
    15057 
    15058  // Postprocess.
    15059  VmaPostprocessCalcStatInfo(pStats->total);
    15060  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    15061  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    15062  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    15063  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    15064 }
    15065 
    15066 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    15067 
    15068 VkResult VmaAllocator_T::DefragmentationBegin(
    15069  const VmaDefragmentationInfo2& info,
    15070  VmaDefragmentationStats* pStats,
    15071  VmaDefragmentationContext* pContext)
    15072 {
    15073  if(info.pAllocationsChanged != VMA_NULL)
    15074  {
    15075  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
    15076  }
    15077 
    15078  *pContext = vma_new(this, VmaDefragmentationContext_T)(
    15079  this, m_CurrentFrameIndex.load(), info.flags, pStats);
    15080 
    15081  (*pContext)->AddPools(info.poolCount, info.pPools);
    15082  (*pContext)->AddAllocations(
    15084 
    15085  VkResult res = (*pContext)->Defragment(
    15088  info.commandBuffer, pStats);
    15089 
    15090  if(res != VK_NOT_READY)
    15091  {
    15092  vma_delete(this, *pContext);
    15093  *pContext = VMA_NULL;
    15094  }
    15095 
    15096  return res;
    15097 }
    15098 
    15099 VkResult VmaAllocator_T::DefragmentationEnd(
    15100  VmaDefragmentationContext context)
    15101 {
    15102  vma_delete(this, context);
    15103  return VK_SUCCESS;
    15104 }
    15105 
    15106 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    15107 {
    15108  if(hAllocation->CanBecomeLost())
    15109  {
    15110  /*
    15111  Warning: This is a carefully designed algorithm.
    15112  Do not modify unless you really know what you're doing :)
    15113  */
    15114  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    15115  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    15116  for(;;)
    15117  {
    15118  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    15119  {
    15120  pAllocationInfo->memoryType = UINT32_MAX;
    15121  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    15122  pAllocationInfo->offset = 0;
    15123  pAllocationInfo->size = hAllocation->GetSize();
    15124  pAllocationInfo->pMappedData = VMA_NULL;
    15125  pAllocationInfo->pUserData = hAllocation->GetUserData();
    15126  return;
    15127  }
    15128  else if(localLastUseFrameIndex == localCurrFrameIndex)
    15129  {
    15130  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    15131  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    15132  pAllocationInfo->offset = hAllocation->GetOffset();
    15133  pAllocationInfo->size = hAllocation->GetSize();
    15134  pAllocationInfo->pMappedData = VMA_NULL;
    15135  pAllocationInfo->pUserData = hAllocation->GetUserData();
    15136  return;
    15137  }
    15138  else // Last use time earlier than current time.
    15139  {
    15140  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    15141  {
    15142  localLastUseFrameIndex = localCurrFrameIndex;
    15143  }
    15144  }
    15145  }
    15146  }
    15147  else
    15148  {
    15149 #if VMA_STATS_STRING_ENABLED
    15150  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    15151  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    15152  for(;;)
    15153  {
    15154  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    15155  if(localLastUseFrameIndex == localCurrFrameIndex)
    15156  {
    15157  break;
    15158  }
    15159  else // Last use time earlier than current time.
    15160  {
    15161  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    15162  {
    15163  localLastUseFrameIndex = localCurrFrameIndex;
    15164  }
    15165  }
    15166  }
    15167 #endif
    15168 
    15169  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    15170  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    15171  pAllocationInfo->offset = hAllocation->GetOffset();
    15172  pAllocationInfo->size = hAllocation->GetSize();
    15173  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    15174  pAllocationInfo->pUserData = hAllocation->GetUserData();
    15175  }
    15176 }
    15177 
    15178 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
    15179 {
    15180  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
    15181  if(hAllocation->CanBecomeLost())
    15182  {
    15183  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    15184  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    15185  for(;;)
    15186  {
    15187  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    15188  {
    15189  return false;
    15190  }
    15191  else if(localLastUseFrameIndex == localCurrFrameIndex)
    15192  {
    15193  return true;
    15194  }
    15195  else // Last use time earlier than current time.
    15196  {
    15197  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    15198  {
    15199  localLastUseFrameIndex = localCurrFrameIndex;
    15200  }
    15201  }
    15202  }
    15203  }
    15204  else
    15205  {
    15206 #if VMA_STATS_STRING_ENABLED
    15207  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    15208  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    15209  for(;;)
    15210  {
    15211  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    15212  if(localLastUseFrameIndex == localCurrFrameIndex)
    15213  {
    15214  break;
    15215  }
    15216  else // Last use time earlier than current time.
    15217  {
    15218  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    15219  {
    15220  localLastUseFrameIndex = localCurrFrameIndex;
    15221  }
    15222  }
    15223  }
    15224 #endif
    15225 
    15226  return true;
    15227  }
    15228 }
    15229 
    15230 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    15231 {
    15232  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
    15233 
    15234  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    15235 
    15236  if(newCreateInfo.maxBlockCount == 0)
    15237  {
    15238  newCreateInfo.maxBlockCount = SIZE_MAX;
    15239  }
    15240  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
    15241  {
    15242  return VK_ERROR_INITIALIZATION_FAILED;
    15243  }
    15244 
    15245  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    15246 
    15247  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
    15248 
    15249  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    15250  if(res != VK_SUCCESS)
    15251  {
    15252  vma_delete(this, *pPool);
    15253  *pPool = VMA_NULL;
    15254  return res;
    15255  }
    15256 
    15257  // Add to m_Pools.
    15258  {
    15259  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
    15260  (*pPool)->SetId(m_NextPoolId++);
    15261  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    15262  }
    15263 
    15264  return VK_SUCCESS;
    15265 }
    15266 
    15267 void VmaAllocator_T::DestroyPool(VmaPool pool)
    15268 {
    15269  // Remove from m_Pools.
    15270  {
    15271  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
    15272  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    15273  VMA_ASSERT(success && "Pool not found in Allocator.");
    15274  }
    15275 
    15276  vma_delete(this, pool);
    15277 }
    15278 
    15279 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    15280 {
    15281  pool->m_BlockVector.GetPoolStats(pPoolStats);
    15282 }
    15283 
    15284 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    15285 {
    15286  m_CurrentFrameIndex.store(frameIndex);
    15287 }
    15288 
    15289 void VmaAllocator_T::MakePoolAllocationsLost(
    15290  VmaPool hPool,
    15291  size_t* pLostAllocationCount)
    15292 {
    15293  hPool->m_BlockVector.MakePoolAllocationsLost(
    15294  m_CurrentFrameIndex.load(),
    15295  pLostAllocationCount);
    15296 }
    15297 
    15298 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
    15299 {
    15300  return hPool->m_BlockVector.CheckCorruption();
    15301 }
    15302 
    15303 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
    15304 {
    15305  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
    15306 
    15307  // Process default pools.
    15308  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15309  {
    15310  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
    15311  {
    15312  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    15313  VMA_ASSERT(pBlockVector);
    15314  VkResult localRes = pBlockVector->CheckCorruption();
    15315  switch(localRes)
    15316  {
    15317  case VK_ERROR_FEATURE_NOT_PRESENT:
    15318  break;
    15319  case VK_SUCCESS:
    15320  finalRes = VK_SUCCESS;
    15321  break;
    15322  default:
    15323  return localRes;
    15324  }
    15325  }
    15326  }
    15327 
    15328  // Process custom pools.
    15329  {
    15330  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
    15331  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    15332  {
    15333  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
    15334  {
    15335  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
    15336  switch(localRes)
    15337  {
    15338  case VK_ERROR_FEATURE_NOT_PRESENT:
    15339  break;
    15340  case VK_SUCCESS:
    15341  finalRes = VK_SUCCESS;
    15342  break;
    15343  default:
    15344  return localRes;
    15345  }
    15346  }
    15347  }
    15348  }
    15349 
    15350  return finalRes;
    15351 }
    15352 
    15353 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    15354 {
    15355  *pAllocation = m_AllocationObjectAllocator.Allocate();
    15356  (*pAllocation)->Ctor(VMA_FRAME_INDEX_LOST, false);
    15357  (*pAllocation)->InitLost();
    15358 }
    15359 
    15360 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    15361 {
    15362  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    15363 
    15364  VkResult res;
    15365  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    15366  {
    15367  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    15368  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    15369  {
    15370  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    15371  if(res == VK_SUCCESS)
    15372  {
    15373  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    15374  }
    15375  }
    15376  else
    15377  {
    15378  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    15379  }
    15380  }
    15381  else
    15382  {
    15383  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    15384  }
    15385 
    15386  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    15387  {
    15388  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    15389  }
    15390 
    15391  return res;
    15392 }
    15393 
    15394 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    15395 {
    15396  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    15397  {
    15398  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    15399  }
    15400 
    15401  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    15402 
    15403  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    15404  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    15405  {
    15406  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    15407  m_HeapSizeLimit[heapIndex] += size;
    15408  }
    15409 }
    15410 
    15411 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    15412 {
    15413  if(hAllocation->CanBecomeLost())
    15414  {
    15415  return VK_ERROR_MEMORY_MAP_FAILED;
    15416  }
    15417 
    15418  switch(hAllocation->GetType())
    15419  {
    15420  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15421  {
    15422  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    15423  char *pBytes = VMA_NULL;
    15424  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
    15425  if(res == VK_SUCCESS)
    15426  {
    15427  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    15428  hAllocation->BlockAllocMap();
    15429  }
    15430  return res;
    15431  }
    15432  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15433  return hAllocation->DedicatedAllocMap(this, ppData);
    15434  default:
    15435  VMA_ASSERT(0);
    15436  return VK_ERROR_MEMORY_MAP_FAILED;
    15437  }
    15438 }
    15439 
    15440 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    15441 {
    15442  switch(hAllocation->GetType())
    15443  {
    15444  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15445  {
    15446  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    15447  hAllocation->BlockAllocUnmap();
    15448  pBlock->Unmap(this, 1);
    15449  }
    15450  break;
    15451  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15452  hAllocation->DedicatedAllocUnmap(this);
    15453  break;
    15454  default:
    15455  VMA_ASSERT(0);
    15456  }
    15457 }
    15458 
    15459 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
    15460 {
    15461  VkResult res = VK_SUCCESS;
    15462  switch(hAllocation->GetType())
    15463  {
    15464  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15465  res = GetVulkanFunctions().vkBindBufferMemory(
    15466  m_hDevice,
    15467  hBuffer,
    15468  hAllocation->GetMemory(),
    15469  0); //memoryOffset
    15470  break;
    15471  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15472  {
    15473  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    15474  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
    15475  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
    15476  break;
    15477  }
    15478  default:
    15479  VMA_ASSERT(0);
    15480  }
    15481  return res;
    15482 }
    15483 
    15484 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
    15485 {
    15486  VkResult res = VK_SUCCESS;
    15487  switch(hAllocation->GetType())
    15488  {
    15489  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15490  res = GetVulkanFunctions().vkBindImageMemory(
    15491  m_hDevice,
    15492  hImage,
    15493  hAllocation->GetMemory(),
    15494  0); //memoryOffset
    15495  break;
    15496  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15497  {
    15498  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    15499  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
    15500  res = pBlock->BindImageMemory(this, hAllocation, hImage);
    15501  break;
    15502  }
    15503  default:
    15504  VMA_ASSERT(0);
    15505  }
    15506  return res;
    15507 }
    15508 
    15509 void VmaAllocator_T::FlushOrInvalidateAllocation(
    15510  VmaAllocation hAllocation,
    15511  VkDeviceSize offset, VkDeviceSize size,
    15512  VMA_CACHE_OPERATION op)
    15513 {
    15514  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
    15515  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
    15516  {
    15517  const VkDeviceSize allocationSize = hAllocation->GetSize();
    15518  VMA_ASSERT(offset <= allocationSize);
    15519 
    15520  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    15521 
    15522  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    15523  memRange.memory = hAllocation->GetMemory();
    15524 
    15525  switch(hAllocation->GetType())
    15526  {
    15527  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15528  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    15529  if(size == VK_WHOLE_SIZE)
    15530  {
    15531  memRange.size = allocationSize - memRange.offset;
    15532  }
    15533  else
    15534  {
    15535  VMA_ASSERT(offset + size <= allocationSize);
    15536  memRange.size = VMA_MIN(
    15537  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
    15538  allocationSize - memRange.offset);
    15539  }
    15540  break;
    15541 
    15542  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15543  {
    15544  // 1. Still within this allocation.
    15545  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    15546  if(size == VK_WHOLE_SIZE)
    15547  {
    15548  size = allocationSize - offset;
    15549  }
    15550  else
    15551  {
    15552  VMA_ASSERT(offset + size <= allocationSize);
    15553  }
    15554  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
    15555 
    15556  // 2. Adjust to whole block.
    15557  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
    15558  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
    15559  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
    15560  memRange.offset += allocationOffset;
    15561  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
    15562 
    15563  break;
    15564  }
    15565 
    15566  default:
    15567  VMA_ASSERT(0);
    15568  }
    15569 
    15570  switch(op)
    15571  {
    15572  case VMA_CACHE_FLUSH:
    15573  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
    15574  break;
    15575  case VMA_CACHE_INVALIDATE:
    15576  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
    15577  break;
    15578  default:
    15579  VMA_ASSERT(0);
    15580  }
    15581  }
    15582  // else: Just ignore this call.
    15583 }
    15584 
    15585 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    15586 {
    15587  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    15588 
    15589  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    15590  {
    15591  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    15592  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    15593  VMA_ASSERT(pDedicatedAllocations);
    15594  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    15595  VMA_ASSERT(success);
    15596  }
    15597 
    15598  VkDeviceMemory hMemory = allocation->GetMemory();
    15599 
    15600  /*
    15601  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    15602  before vkFreeMemory.
    15603 
    15604  if(allocation->GetMappedData() != VMA_NULL)
    15605  {
    15606  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    15607  }
    15608  */
    15609 
    15610  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    15611 
    15612  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    15613 }
    15614 
    15615 uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
    15616 {
    15617  VkBufferCreateInfo dummyBufCreateInfo;
    15618  VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
    15619 
    15620  uint32_t memoryTypeBits = 0;
    15621 
    15622  // Create buffer.
    15623  VkBuffer buf = VK_NULL_HANDLE;
    15624  VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
    15625  m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
    15626  if(res == VK_SUCCESS)
    15627  {
    15628  // Query for supported memory types.
    15629  VkMemoryRequirements memReq;
    15630  (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
    15631  memoryTypeBits = memReq.memoryTypeBits;
    15632 
    15633  // Destroy buffer.
    15634  (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
    15635  }
    15636 
    15637  return memoryTypeBits;
    15638 }
    15639 
    15640 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
    15641 {
    15642  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
    15643  !hAllocation->CanBecomeLost() &&
    15644  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    15645  {
    15646  void* pData = VMA_NULL;
    15647  VkResult res = Map(hAllocation, &pData);
    15648  if(res == VK_SUCCESS)
    15649  {
    15650  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
    15651  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
    15652  Unmap(hAllocation);
    15653  }
    15654  else
    15655  {
    15656  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
    15657  }
    15658  }
    15659 }
    15660 
    15661 uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
    15662 {
    15663  uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
    15664  if(memoryTypeBits == UINT32_MAX)
    15665  {
    15666  memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
    15667  m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
    15668  }
    15669  return memoryTypeBits;
    15670 }
    15671 
    15672 #if VMA_STATS_STRING_ENABLED
    15673 
    15674 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    15675 {
    15676  bool dedicatedAllocationsStarted = false;
    15677  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15678  {
    15679  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    15680  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    15681  VMA_ASSERT(pDedicatedAllocVector);
    15682  if(pDedicatedAllocVector->empty() == false)
    15683  {
    15684  if(dedicatedAllocationsStarted == false)
    15685  {
    15686  dedicatedAllocationsStarted = true;
    15687  json.WriteString("DedicatedAllocations");
    15688  json.BeginObject();
    15689  }
    15690 
    15691  json.BeginString("Type ");
    15692  json.ContinueString(memTypeIndex);
    15693  json.EndString();
    15694 
    15695  json.BeginArray();
    15696 
    15697  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    15698  {
    15699  json.BeginObject(true);
    15700  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    15701  hAlloc->PrintParameters(json);
    15702  json.EndObject();
    15703  }
    15704 
    15705  json.EndArray();
    15706  }
    15707  }
    15708  if(dedicatedAllocationsStarted)
    15709  {
    15710  json.EndObject();
    15711  }
    15712 
    15713  {
    15714  bool allocationsStarted = false;
    15715  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15716  {
    15717  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    15718  {
    15719  if(allocationsStarted == false)
    15720  {
    15721  allocationsStarted = true;
    15722  json.WriteString("DefaultPools");
    15723  json.BeginObject();
    15724  }
    15725 
    15726  json.BeginString("Type ");
    15727  json.ContinueString(memTypeIndex);
    15728  json.EndString();
    15729 
    15730  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    15731  }
    15732  }
    15733  if(allocationsStarted)
    15734  {
    15735  json.EndObject();
    15736  }
    15737  }
    15738 
    15739  // Custom pools
    15740  {
    15741  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
    15742  const size_t poolCount = m_Pools.size();
    15743  if(poolCount > 0)
    15744  {
    15745  json.WriteString("Pools");
    15746  json.BeginObject();
    15747  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    15748  {
    15749  json.BeginString();
    15750  json.ContinueString(m_Pools[poolIndex]->GetId());
    15751  json.EndString();
    15752 
    15753  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    15754  }
    15755  json.EndObject();
    15756  }
    15757  }
    15758 }
    15759 
    15760 #endif // #if VMA_STATS_STRING_ENABLED
    15761 
    15763 // Public interface
    15764 
    15765 VkResult vmaCreateAllocator(
    15766  const VmaAllocatorCreateInfo* pCreateInfo,
    15767  VmaAllocator* pAllocator)
    15768 {
    15769  VMA_ASSERT(pCreateInfo && pAllocator);
    15770  VMA_DEBUG_LOG("vmaCreateAllocator");
    15771  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    15772  return (*pAllocator)->Init(pCreateInfo);
    15773 }
    15774 
    15775 void vmaDestroyAllocator(
    15776  VmaAllocator allocator)
    15777 {
    15778  if(allocator != VK_NULL_HANDLE)
    15779  {
    15780  VMA_DEBUG_LOG("vmaDestroyAllocator");
    15781  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    15782  vma_delete(&allocationCallbacks, allocator);
    15783  }
    15784 }
    15785 
    15787  VmaAllocator allocator,
    15788  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    15789 {
    15790  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    15791  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    15792 }
    15793 
    15795  VmaAllocator allocator,
    15796  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    15797 {
    15798  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    15799  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    15800 }
    15801 
    15803  VmaAllocator allocator,
    15804  uint32_t memoryTypeIndex,
    15805  VkMemoryPropertyFlags* pFlags)
    15806 {
    15807  VMA_ASSERT(allocator && pFlags);
    15808  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    15809  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    15810 }
    15811 
    15813  VmaAllocator allocator,
    15814  uint32_t frameIndex)
    15815 {
    15816  VMA_ASSERT(allocator);
    15817  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    15818 
    15819  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15820 
    15821  allocator->SetCurrentFrameIndex(frameIndex);
    15822 }
    15823 
    15824 void vmaCalculateStats(
    15825  VmaAllocator allocator,
    15826  VmaStats* pStats)
    15827 {
    15828  VMA_ASSERT(allocator && pStats);
    15829  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15830  allocator->CalculateStats(pStats);
    15831 }
    15832 
    15833 #if VMA_STATS_STRING_ENABLED
    15834 
    15835 void vmaBuildStatsString(
    15836  VmaAllocator allocator,
    15837  char** ppStatsString,
    15838  VkBool32 detailedMap)
    15839 {
    15840  VMA_ASSERT(allocator && ppStatsString);
    15841  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15842 
    15843  VmaStringBuilder sb(allocator);
    15844  {
    15845  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    15846  json.BeginObject();
    15847 
    15848  VmaStats stats;
    15849  allocator->CalculateStats(&stats);
    15850 
    15851  json.WriteString("Total");
    15852  VmaPrintStatInfo(json, stats.total);
    15853 
    15854  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    15855  {
    15856  json.BeginString("Heap ");
    15857  json.ContinueString(heapIndex);
    15858  json.EndString();
    15859  json.BeginObject();
    15860 
    15861  json.WriteString("Size");
    15862  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    15863 
    15864  json.WriteString("Flags");
    15865  json.BeginArray(true);
    15866  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    15867  {
    15868  json.WriteString("DEVICE_LOCAL");
    15869  }
    15870  json.EndArray();
    15871 
    15872  if(stats.memoryHeap[heapIndex].blockCount > 0)
    15873  {
    15874  json.WriteString("Stats");
    15875  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    15876  }
    15877 
    15878  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    15879  {
    15880  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    15881  {
    15882  json.BeginString("Type ");
    15883  json.ContinueString(typeIndex);
    15884  json.EndString();
    15885 
    15886  json.BeginObject();
    15887 
    15888  json.WriteString("Flags");
    15889  json.BeginArray(true);
    15890  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    15891  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    15892  {
    15893  json.WriteString("DEVICE_LOCAL");
    15894  }
    15895  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    15896  {
    15897  json.WriteString("HOST_VISIBLE");
    15898  }
    15899  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    15900  {
    15901  json.WriteString("HOST_COHERENT");
    15902  }
    15903  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    15904  {
    15905  json.WriteString("HOST_CACHED");
    15906  }
    15907  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    15908  {
    15909  json.WriteString("LAZILY_ALLOCATED");
    15910  }
    15911  json.EndArray();
    15912 
    15913  if(stats.memoryType[typeIndex].blockCount > 0)
    15914  {
    15915  json.WriteString("Stats");
    15916  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    15917  }
    15918 
    15919  json.EndObject();
    15920  }
    15921  }
    15922 
    15923  json.EndObject();
    15924  }
    15925  if(detailedMap == VK_TRUE)
    15926  {
    15927  allocator->PrintDetailedMap(json);
    15928  }
    15929 
    15930  json.EndObject();
    15931  }
    15932 
    15933  const size_t len = sb.GetLength();
    15934  char* const pChars = vma_new_array(allocator, char, len + 1);
    15935  if(len > 0)
    15936  {
    15937  memcpy(pChars, sb.GetData(), len);
    15938  }
    15939  pChars[len] = '\0';
    15940  *ppStatsString = pChars;
    15941 }
    15942 
    15943 void vmaFreeStatsString(
    15944  VmaAllocator allocator,
    15945  char* pStatsString)
    15946 {
    15947  if(pStatsString != VMA_NULL)
    15948  {
    15949  VMA_ASSERT(allocator);
    15950  size_t len = strlen(pStatsString);
    15951  vma_delete_array(allocator, pStatsString, len + 1);
    15952  }
    15953 }
    15954 
    15955 #endif // #if VMA_STATS_STRING_ENABLED
    15956 
    15957 /*
    15958 This function is not protected by any mutex because it just reads immutable data.
    15959 */
    15960 VkResult vmaFindMemoryTypeIndex(
    15961  VmaAllocator allocator,
    15962  uint32_t memoryTypeBits,
    15963  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    15964  uint32_t* pMemoryTypeIndex)
    15965 {
    15966  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    15967  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    15968  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    15969 
    15970  if(pAllocationCreateInfo->memoryTypeBits != 0)
    15971  {
    15972  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
    15973  }
    15974 
    15975  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    15976  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    15977 
    15978  // Convert usage to requiredFlags and preferredFlags.
    15979  switch(pAllocationCreateInfo->usage)
    15980  {
    15982  break;
    15984  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    15985  {
    15986  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    15987  }
    15988  break;
    15990  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    15991  break;
    15993  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    15994  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    15995  {
    15996  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    15997  }
    15998  break;
    16000  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    16001  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    16002  break;
    16003  default:
    16004  break;
    16005  }
    16006 
    16007  *pMemoryTypeIndex = UINT32_MAX;
    16008  uint32_t minCost = UINT32_MAX;
    16009  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    16010  memTypeIndex < allocator->GetMemoryTypeCount();
    16011  ++memTypeIndex, memTypeBit <<= 1)
    16012  {
    16013  // This memory type is acceptable according to memoryTypeBits bitmask.
    16014  if((memTypeBit & memoryTypeBits) != 0)
    16015  {
    16016  const VkMemoryPropertyFlags currFlags =
    16017  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    16018  // This memory type contains requiredFlags.
    16019  if((requiredFlags & ~currFlags) == 0)
    16020  {
    16021  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    16022  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
    16023  // Remember memory type with lowest cost.
    16024  if(currCost < minCost)
    16025  {
    16026  *pMemoryTypeIndex = memTypeIndex;
    16027  if(currCost == 0)
    16028  {
    16029  return VK_SUCCESS;
    16030  }
    16031  minCost = currCost;
    16032  }
    16033  }
    16034  }
    16035  }
    16036  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    16037 }
    16038 
    16040  VmaAllocator allocator,
    16041  const VkBufferCreateInfo* pBufferCreateInfo,
    16042  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    16043  uint32_t* pMemoryTypeIndex)
    16044 {
    16045  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    16046  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
    16047  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    16048  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    16049 
    16050  const VkDevice hDev = allocator->m_hDevice;
    16051  VkBuffer hBuffer = VK_NULL_HANDLE;
    16052  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
    16053  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
    16054  if(res == VK_SUCCESS)
    16055  {
    16056  VkMemoryRequirements memReq = {};
    16057  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
    16058  hDev, hBuffer, &memReq);
    16059 
    16060  res = vmaFindMemoryTypeIndex(
    16061  allocator,
    16062  memReq.memoryTypeBits,
    16063  pAllocationCreateInfo,
    16064  pMemoryTypeIndex);
    16065 
    16066  allocator->GetVulkanFunctions().vkDestroyBuffer(
    16067  hDev, hBuffer, allocator->GetAllocationCallbacks());
    16068  }
    16069  return res;
    16070 }
    16071 
    16073  VmaAllocator allocator,
    16074  const VkImageCreateInfo* pImageCreateInfo,
    16075  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    16076  uint32_t* pMemoryTypeIndex)
    16077 {
    16078  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    16079  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
    16080  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    16081  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    16082 
    16083  const VkDevice hDev = allocator->m_hDevice;
    16084  VkImage hImage = VK_NULL_HANDLE;
    16085  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
    16086  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
    16087  if(res == VK_SUCCESS)
    16088  {
    16089  VkMemoryRequirements memReq = {};
    16090  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
    16091  hDev, hImage, &memReq);
    16092 
    16093  res = vmaFindMemoryTypeIndex(
    16094  allocator,
    16095  memReq.memoryTypeBits,
    16096  pAllocationCreateInfo,
    16097  pMemoryTypeIndex);
    16098 
    16099  allocator->GetVulkanFunctions().vkDestroyImage(
    16100  hDev, hImage, allocator->GetAllocationCallbacks());
    16101  }
    16102  return res;
    16103 }
    16104 
    16105 VkResult vmaCreatePool(
    16106  VmaAllocator allocator,
    16107  const VmaPoolCreateInfo* pCreateInfo,
    16108  VmaPool* pPool)
    16109 {
    16110  VMA_ASSERT(allocator && pCreateInfo && pPool);
    16111 
    16112  VMA_DEBUG_LOG("vmaCreatePool");
    16113 
    16114  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16115 
    16116  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
    16117 
    16118 #if VMA_RECORDING_ENABLED
    16119  if(allocator->GetRecorder() != VMA_NULL)
    16120  {
    16121  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
    16122  }
    16123 #endif
    16124 
    16125  return res;
    16126 }
    16127 
    16128 void vmaDestroyPool(
    16129  VmaAllocator allocator,
    16130  VmaPool pool)
    16131 {
    16132  VMA_ASSERT(allocator);
    16133 
    16134  if(pool == VK_NULL_HANDLE)
    16135  {
    16136  return;
    16137  }
    16138 
    16139  VMA_DEBUG_LOG("vmaDestroyPool");
    16140 
    16141  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16142 
    16143 #if VMA_RECORDING_ENABLED
    16144  if(allocator->GetRecorder() != VMA_NULL)
    16145  {
    16146  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
    16147  }
    16148 #endif
    16149 
    16150  allocator->DestroyPool(pool);
    16151 }
    16152 
    16153 void vmaGetPoolStats(
    16154  VmaAllocator allocator,
    16155  VmaPool pool,
    16156  VmaPoolStats* pPoolStats)
    16157 {
    16158  VMA_ASSERT(allocator && pool && pPoolStats);
    16159 
    16160  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16161 
    16162  allocator->GetPoolStats(pool, pPoolStats);
    16163 }
    16164 
    16166  VmaAllocator allocator,
    16167  VmaPool pool,
    16168  size_t* pLostAllocationCount)
    16169 {
    16170  VMA_ASSERT(allocator && pool);
    16171 
    16172  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16173 
    16174 #if VMA_RECORDING_ENABLED
    16175  if(allocator->GetRecorder() != VMA_NULL)
    16176  {
    16177  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
    16178  }
    16179 #endif
    16180 
    16181  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    16182 }
    16183 
    16184 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    16185 {
    16186  VMA_ASSERT(allocator && pool);
    16187 
    16188  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16189 
    16190  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
    16191 
    16192  return allocator->CheckPoolCorruption(pool);
    16193 }
    16194 
    16195 VkResult vmaAllocateMemory(
    16196  VmaAllocator allocator,
    16197  const VkMemoryRequirements* pVkMemoryRequirements,
    16198  const VmaAllocationCreateInfo* pCreateInfo,
    16199  VmaAllocation* pAllocation,
    16200  VmaAllocationInfo* pAllocationInfo)
    16201 {
    16202  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    16203 
    16204  VMA_DEBUG_LOG("vmaAllocateMemory");
    16205 
    16206  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16207 
    16208  VkResult result = allocator->AllocateMemory(
    16209  *pVkMemoryRequirements,
    16210  false, // requiresDedicatedAllocation
    16211  false, // prefersDedicatedAllocation
    16212  VK_NULL_HANDLE, // dedicatedBuffer
    16213  VK_NULL_HANDLE, // dedicatedImage
    16214  *pCreateInfo,
    16215  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    16216  1, // allocationCount
    16217  pAllocation);
    16218 
    16219 #if VMA_RECORDING_ENABLED
    16220  if(allocator->GetRecorder() != VMA_NULL)
    16221  {
    16222  allocator->GetRecorder()->RecordAllocateMemory(
    16223  allocator->GetCurrentFrameIndex(),
    16224  *pVkMemoryRequirements,
    16225  *pCreateInfo,
    16226  *pAllocation);
    16227  }
    16228 #endif
    16229 
    16230  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    16231  {
    16232  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16233  }
    16234 
    16235  return result;
    16236 }
    16237 
    16238 VkResult vmaAllocateMemoryPages(
    16239  VmaAllocator allocator,
    16240  const VkMemoryRequirements* pVkMemoryRequirements,
    16241  const VmaAllocationCreateInfo* pCreateInfo,
    16242  size_t allocationCount,
    16243  VmaAllocation* pAllocations,
    16244  VmaAllocationInfo* pAllocationInfo)
    16245 {
    16246  if(allocationCount == 0)
    16247  {
    16248  return VK_SUCCESS;
    16249  }
    16250 
    16251  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
    16252 
    16253  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
    16254 
    16255  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16256 
    16257  VkResult result = allocator->AllocateMemory(
    16258  *pVkMemoryRequirements,
    16259  false, // requiresDedicatedAllocation
    16260  false, // prefersDedicatedAllocation
    16261  VK_NULL_HANDLE, // dedicatedBuffer
    16262  VK_NULL_HANDLE, // dedicatedImage
    16263  *pCreateInfo,
    16264  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    16265  allocationCount,
    16266  pAllocations);
    16267 
    16268 #if VMA_RECORDING_ENABLED
    16269  if(allocator->GetRecorder() != VMA_NULL)
    16270  {
    16271  allocator->GetRecorder()->RecordAllocateMemoryPages(
    16272  allocator->GetCurrentFrameIndex(),
    16273  *pVkMemoryRequirements,
    16274  *pCreateInfo,
    16275  (uint64_t)allocationCount,
    16276  pAllocations);
    16277  }
    16278 #endif
    16279 
    16280  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    16281  {
    16282  for(size_t i = 0; i < allocationCount; ++i)
    16283  {
    16284  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
    16285  }
    16286  }
    16287 
    16288  return result;
    16289 }
    16290 
    16292  VmaAllocator allocator,
    16293  VkBuffer buffer,
    16294  const VmaAllocationCreateInfo* pCreateInfo,
    16295  VmaAllocation* pAllocation,
    16296  VmaAllocationInfo* pAllocationInfo)
    16297 {
    16298  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    16299 
    16300  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    16301 
    16302  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16303 
    16304  VkMemoryRequirements vkMemReq = {};
    16305  bool requiresDedicatedAllocation = false;
    16306  bool prefersDedicatedAllocation = false;
    16307  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    16308  requiresDedicatedAllocation,
    16309  prefersDedicatedAllocation);
    16310 
    16311  VkResult result = allocator->AllocateMemory(
    16312  vkMemReq,
    16313  requiresDedicatedAllocation,
    16314  prefersDedicatedAllocation,
    16315  buffer, // dedicatedBuffer
    16316  VK_NULL_HANDLE, // dedicatedImage
    16317  *pCreateInfo,
    16318  VMA_SUBALLOCATION_TYPE_BUFFER,
    16319  1, // allocationCount
    16320  pAllocation);
    16321 
    16322 #if VMA_RECORDING_ENABLED
    16323  if(allocator->GetRecorder() != VMA_NULL)
    16324  {
    16325  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
    16326  allocator->GetCurrentFrameIndex(),
    16327  vkMemReq,
    16328  requiresDedicatedAllocation,
    16329  prefersDedicatedAllocation,
    16330  *pCreateInfo,
    16331  *pAllocation);
    16332  }
    16333 #endif
    16334 
    16335  if(pAllocationInfo && result == VK_SUCCESS)
    16336  {
    16337  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16338  }
    16339 
    16340  return result;
    16341 }
    16342 
    16343 VkResult vmaAllocateMemoryForImage(
    16344  VmaAllocator allocator,
    16345  VkImage image,
    16346  const VmaAllocationCreateInfo* pCreateInfo,
    16347  VmaAllocation* pAllocation,
    16348  VmaAllocationInfo* pAllocationInfo)
    16349 {
    16350  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    16351 
    16352  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    16353 
    16354  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16355 
    16356  VkMemoryRequirements vkMemReq = {};
    16357  bool requiresDedicatedAllocation = false;
    16358  bool prefersDedicatedAllocation = false;
    16359  allocator->GetImageMemoryRequirements(image, vkMemReq,
    16360  requiresDedicatedAllocation, prefersDedicatedAllocation);
    16361 
    16362  VkResult result = allocator->AllocateMemory(
    16363  vkMemReq,
    16364  requiresDedicatedAllocation,
    16365  prefersDedicatedAllocation,
    16366  VK_NULL_HANDLE, // dedicatedBuffer
    16367  image, // dedicatedImage
    16368  *pCreateInfo,
    16369  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    16370  1, // allocationCount
    16371  pAllocation);
    16372 
    16373 #if VMA_RECORDING_ENABLED
    16374  if(allocator->GetRecorder() != VMA_NULL)
    16375  {
    16376  allocator->GetRecorder()->RecordAllocateMemoryForImage(
    16377  allocator->GetCurrentFrameIndex(),
    16378  vkMemReq,
    16379  requiresDedicatedAllocation,
    16380  prefersDedicatedAllocation,
    16381  *pCreateInfo,
    16382  *pAllocation);
    16383  }
    16384 #endif
    16385 
    16386  if(pAllocationInfo && result == VK_SUCCESS)
    16387  {
    16388  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16389  }
    16390 
    16391  return result;
    16392 }
    16393 
    16394 void vmaFreeMemory(
    16395  VmaAllocator allocator,
    16396  VmaAllocation allocation)
    16397 {
    16398  VMA_ASSERT(allocator);
    16399 
    16400  if(allocation == VK_NULL_HANDLE)
    16401  {
    16402  return;
    16403  }
    16404 
    16405  VMA_DEBUG_LOG("vmaFreeMemory");
    16406 
    16407  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16408 
    16409 #if VMA_RECORDING_ENABLED
    16410  if(allocator->GetRecorder() != VMA_NULL)
    16411  {
    16412  allocator->GetRecorder()->RecordFreeMemory(
    16413  allocator->GetCurrentFrameIndex(),
    16414  allocation);
    16415  }
    16416 #endif
    16417 
    16418  allocator->FreeMemory(
    16419  1, // allocationCount
    16420  &allocation);
    16421 }
    16422 
    16423 void vmaFreeMemoryPages(
    16424  VmaAllocator allocator,
    16425  size_t allocationCount,
    16426  VmaAllocation* pAllocations)
    16427 {
    16428  if(allocationCount == 0)
    16429  {
    16430  return;
    16431  }
    16432 
    16433  VMA_ASSERT(allocator);
    16434 
    16435  VMA_DEBUG_LOG("vmaFreeMemoryPages");
    16436 
    16437  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16438 
    16439 #if VMA_RECORDING_ENABLED
    16440  if(allocator->GetRecorder() != VMA_NULL)
    16441  {
    16442  allocator->GetRecorder()->RecordFreeMemoryPages(
    16443  allocator->GetCurrentFrameIndex(),
    16444  (uint64_t)allocationCount,
    16445  pAllocations);
    16446  }
    16447 #endif
    16448 
    16449  allocator->FreeMemory(allocationCount, pAllocations);
    16450 }
    16451 
    16452 VkResult vmaResizeAllocation(
    16453  VmaAllocator allocator,
    16454  VmaAllocation allocation,
    16455  VkDeviceSize newSize)
    16456 {
    16457  VMA_ASSERT(allocator && allocation);
    16458 
    16459  VMA_DEBUG_LOG("vmaResizeAllocation");
    16460 
    16461  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16462 
    16463 #if VMA_RECORDING_ENABLED
    16464  if(allocator->GetRecorder() != VMA_NULL)
    16465  {
    16466  allocator->GetRecorder()->RecordResizeAllocation(
    16467  allocator->GetCurrentFrameIndex(),
    16468  allocation,
    16469  newSize);
    16470  }
    16471 #endif
    16472 
    16473  return allocator->ResizeAllocation(allocation, newSize);
    16474 }
    16475 
    16477  VmaAllocator allocator,
    16478  VmaAllocation allocation,
    16479  VmaAllocationInfo* pAllocationInfo)
    16480 {
    16481  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    16482 
    16483  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16484 
    16485 #if VMA_RECORDING_ENABLED
    16486  if(allocator->GetRecorder() != VMA_NULL)
    16487  {
    16488  allocator->GetRecorder()->RecordGetAllocationInfo(
    16489  allocator->GetCurrentFrameIndex(),
    16490  allocation);
    16491  }
    16492 #endif
    16493 
    16494  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    16495 }
    16496 
    16497 VkBool32 vmaTouchAllocation(
    16498  VmaAllocator allocator,
    16499  VmaAllocation allocation)
    16500 {
    16501  VMA_ASSERT(allocator && allocation);
    16502 
    16503  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16504 
    16505 #if VMA_RECORDING_ENABLED
    16506  if(allocator->GetRecorder() != VMA_NULL)
    16507  {
    16508  allocator->GetRecorder()->RecordTouchAllocation(
    16509  allocator->GetCurrentFrameIndex(),
    16510  allocation);
    16511  }
    16512 #endif
    16513 
    16514  return allocator->TouchAllocation(allocation);
    16515 }
    16516 
    16518  VmaAllocator allocator,
    16519  VmaAllocation allocation,
    16520  void* pUserData)
    16521 {
    16522  VMA_ASSERT(allocator && allocation);
    16523 
    16524  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16525 
    16526  allocation->SetUserData(allocator, pUserData);
    16527 
    16528 #if VMA_RECORDING_ENABLED
    16529  if(allocator->GetRecorder() != VMA_NULL)
    16530  {
    16531  allocator->GetRecorder()->RecordSetAllocationUserData(
    16532  allocator->GetCurrentFrameIndex(),
    16533  allocation,
    16534  pUserData);
    16535  }
    16536 #endif
    16537 }
    16538 
    16540  VmaAllocator allocator,
    16541  VmaAllocation* pAllocation)
    16542 {
    16543  VMA_ASSERT(allocator && pAllocation);
    16544 
    16545  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    16546 
    16547  allocator->CreateLostAllocation(pAllocation);
    16548 
    16549 #if VMA_RECORDING_ENABLED
    16550  if(allocator->GetRecorder() != VMA_NULL)
    16551  {
    16552  allocator->GetRecorder()->RecordCreateLostAllocation(
    16553  allocator->GetCurrentFrameIndex(),
    16554  *pAllocation);
    16555  }
    16556 #endif
    16557 }
    16558 
    16559 VkResult vmaMapMemory(
    16560  VmaAllocator allocator,
    16561  VmaAllocation allocation,
    16562  void** ppData)
    16563 {
    16564  VMA_ASSERT(allocator && allocation && ppData);
    16565 
    16566  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16567 
    16568  VkResult res = allocator->Map(allocation, ppData);
    16569 
    16570 #if VMA_RECORDING_ENABLED
    16571  if(allocator->GetRecorder() != VMA_NULL)
    16572  {
    16573  allocator->GetRecorder()->RecordMapMemory(
    16574  allocator->GetCurrentFrameIndex(),
    16575  allocation);
    16576  }
    16577 #endif
    16578 
    16579  return res;
    16580 }
    16581 
    16582 void vmaUnmapMemory(
    16583  VmaAllocator allocator,
    16584  VmaAllocation allocation)
    16585 {
    16586  VMA_ASSERT(allocator && allocation);
    16587 
    16588  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16589 
    16590 #if VMA_RECORDING_ENABLED
    16591  if(allocator->GetRecorder() != VMA_NULL)
    16592  {
    16593  allocator->GetRecorder()->RecordUnmapMemory(
    16594  allocator->GetCurrentFrameIndex(),
    16595  allocation);
    16596  }
    16597 #endif
    16598 
    16599  allocator->Unmap(allocation);
    16600 }
    16601 
    16602 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    16603 {
    16604  VMA_ASSERT(allocator && allocation);
    16605 
    16606  VMA_DEBUG_LOG("vmaFlushAllocation");
    16607 
    16608  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16609 
    16610  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
    16611 
    16612 #if VMA_RECORDING_ENABLED
    16613  if(allocator->GetRecorder() != VMA_NULL)
    16614  {
    16615  allocator->GetRecorder()->RecordFlushAllocation(
    16616  allocator->GetCurrentFrameIndex(),
    16617  allocation, offset, size);
    16618  }
    16619 #endif
    16620 }
    16621 
    16622 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    16623 {
    16624  VMA_ASSERT(allocator && allocation);
    16625 
    16626  VMA_DEBUG_LOG("vmaInvalidateAllocation");
    16627 
    16628  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16629 
    16630  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
    16631 
    16632 #if VMA_RECORDING_ENABLED
    16633  if(allocator->GetRecorder() != VMA_NULL)
    16634  {
    16635  allocator->GetRecorder()->RecordInvalidateAllocation(
    16636  allocator->GetCurrentFrameIndex(),
    16637  allocation, offset, size);
    16638  }
    16639 #endif
    16640 }
    16641 
    16642 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    16643 {
    16644  VMA_ASSERT(allocator);
    16645 
    16646  VMA_DEBUG_LOG("vmaCheckCorruption");
    16647 
    16648  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16649 
    16650  return allocator->CheckCorruption(memoryTypeBits);
    16651 }
    16652 
    16653 VkResult vmaDefragment(
    16654  VmaAllocator allocator,
    16655  VmaAllocation* pAllocations,
    16656  size_t allocationCount,
    16657  VkBool32* pAllocationsChanged,
    16658  const VmaDefragmentationInfo *pDefragmentationInfo,
    16659  VmaDefragmentationStats* pDefragmentationStats)
    16660 {
    16661  // Deprecated interface, reimplemented using new one.
    16662 
    16663  VmaDefragmentationInfo2 info2 = {};
    16664  info2.allocationCount = (uint32_t)allocationCount;
    16665  info2.pAllocations = pAllocations;
    16666  info2.pAllocationsChanged = pAllocationsChanged;
    16667  if(pDefragmentationInfo != VMA_NULL)
    16668  {
    16669  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    16670  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
    16671  }
    16672  else
    16673  {
    16674  info2.maxCpuAllocationsToMove = UINT32_MAX;
    16675  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
    16676  }
    16677  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
    16678 
    16680  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
    16681  if(res == VK_NOT_READY)
    16682  {
    16683  res = vmaDefragmentationEnd( allocator, ctx);
    16684  }
    16685  return res;
    16686 }
    16687 
    16688 VkResult vmaDefragmentationBegin(
    16689  VmaAllocator allocator,
    16690  const VmaDefragmentationInfo2* pInfo,
    16691  VmaDefragmentationStats* pStats,
    16692  VmaDefragmentationContext *pContext)
    16693 {
    16694  VMA_ASSERT(allocator && pInfo && pContext);
    16695 
    16696  // Degenerate case: Nothing to defragment.
    16697  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
    16698  {
    16699  return VK_SUCCESS;
    16700  }
    16701 
    16702  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
    16703  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
    16704  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
    16705  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
    16706 
    16707  VMA_DEBUG_LOG("vmaDefragmentationBegin");
    16708 
    16709  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16710 
    16711  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
    16712 
    16713 #if VMA_RECORDING_ENABLED
    16714  if(allocator->GetRecorder() != VMA_NULL)
    16715  {
    16716  allocator->GetRecorder()->RecordDefragmentationBegin(
    16717  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
    16718  }
    16719 #endif
    16720 
    16721  return res;
    16722 }
    16723 
    16724 VkResult vmaDefragmentationEnd(
    16725  VmaAllocator allocator,
    16726  VmaDefragmentationContext context)
    16727 {
    16728  VMA_ASSERT(allocator);
    16729 
    16730  VMA_DEBUG_LOG("vmaDefragmentationEnd");
    16731 
    16732  if(context != VK_NULL_HANDLE)
    16733  {
    16734  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16735 
    16736 #if VMA_RECORDING_ENABLED
    16737  if(allocator->GetRecorder() != VMA_NULL)
    16738  {
    16739  allocator->GetRecorder()->RecordDefragmentationEnd(
    16740  allocator->GetCurrentFrameIndex(), context);
    16741  }
    16742 #endif
    16743 
    16744  return allocator->DefragmentationEnd(context);
    16745  }
    16746  else
    16747  {
    16748  return VK_SUCCESS;
    16749  }
    16750 }
    16751 
    16752 VkResult vmaBindBufferMemory(
    16753  VmaAllocator allocator,
    16754  VmaAllocation allocation,
    16755  VkBuffer buffer)
    16756 {
    16757  VMA_ASSERT(allocator && allocation && buffer);
    16758 
    16759  VMA_DEBUG_LOG("vmaBindBufferMemory");
    16760 
    16761  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16762 
    16763  return allocator->BindBufferMemory(allocation, buffer);
    16764 }
    16765 
    16766 VkResult vmaBindImageMemory(
    16767  VmaAllocator allocator,
    16768  VmaAllocation allocation,
    16769  VkImage image)
    16770 {
    16771  VMA_ASSERT(allocator && allocation && image);
    16772 
    16773  VMA_DEBUG_LOG("vmaBindImageMemory");
    16774 
    16775  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16776 
    16777  return allocator->BindImageMemory(allocation, image);
    16778 }
    16779 
    16780 VkResult vmaCreateBuffer(
    16781  VmaAllocator allocator,
    16782  const VkBufferCreateInfo* pBufferCreateInfo,
    16783  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    16784  VkBuffer* pBuffer,
    16785  VmaAllocation* pAllocation,
    16786  VmaAllocationInfo* pAllocationInfo)
    16787 {
    16788  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    16789 
    16790  if(pBufferCreateInfo->size == 0)
    16791  {
    16792  return VK_ERROR_VALIDATION_FAILED_EXT;
    16793  }
    16794 
    16795  VMA_DEBUG_LOG("vmaCreateBuffer");
    16796 
    16797  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16798 
    16799  *pBuffer = VK_NULL_HANDLE;
    16800  *pAllocation = VK_NULL_HANDLE;
    16801 
    16802  // 1. Create VkBuffer.
    16803  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    16804  allocator->m_hDevice,
    16805  pBufferCreateInfo,
    16806  allocator->GetAllocationCallbacks(),
    16807  pBuffer);
    16808  if(res >= 0)
    16809  {
    16810  // 2. vkGetBufferMemoryRequirements.
    16811  VkMemoryRequirements vkMemReq = {};
    16812  bool requiresDedicatedAllocation = false;
    16813  bool prefersDedicatedAllocation = false;
    16814  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    16815  requiresDedicatedAllocation, prefersDedicatedAllocation);
    16816 
    16817  // Make sure alignment requirements for specific buffer usages reported
    16818  // in Physical Device Properties are included in alignment reported by memory requirements.
    16819  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
    16820  {
    16821  VMA_ASSERT(vkMemReq.alignment %
    16822  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
    16823  }
    16824  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
    16825  {
    16826  VMA_ASSERT(vkMemReq.alignment %
    16827  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
    16828  }
    16829  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
    16830  {
    16831  VMA_ASSERT(vkMemReq.alignment %
    16832  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
    16833  }
    16834 
    16835  // 3. Allocate memory using allocator.
    16836  res = allocator->AllocateMemory(
    16837  vkMemReq,
    16838  requiresDedicatedAllocation,
    16839  prefersDedicatedAllocation,
    16840  *pBuffer, // dedicatedBuffer
    16841  VK_NULL_HANDLE, // dedicatedImage
    16842  *pAllocationCreateInfo,
    16843  VMA_SUBALLOCATION_TYPE_BUFFER,
    16844  1, // allocationCount
    16845  pAllocation);
    16846 
    16847 #if VMA_RECORDING_ENABLED
    16848  if(allocator->GetRecorder() != VMA_NULL)
    16849  {
    16850  allocator->GetRecorder()->RecordCreateBuffer(
    16851  allocator->GetCurrentFrameIndex(),
    16852  *pBufferCreateInfo,
    16853  *pAllocationCreateInfo,
    16854  *pAllocation);
    16855  }
    16856 #endif
    16857 
    16858  if(res >= 0)
    16859  {
    16860  // 3. Bind buffer with memory.
    16861  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
    16862  {
    16863  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
    16864  }
    16865  if(res >= 0)
    16866  {
    16867  // All steps succeeded.
    16868  #if VMA_STATS_STRING_ENABLED
    16869  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
    16870  #endif
    16871  if(pAllocationInfo != VMA_NULL)
    16872  {
    16873  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16874  }
    16875 
    16876  return VK_SUCCESS;
    16877  }
    16878  allocator->FreeMemory(
    16879  1, // allocationCount
    16880  pAllocation);
    16881  *pAllocation = VK_NULL_HANDLE;
    16882  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    16883  *pBuffer = VK_NULL_HANDLE;
    16884  return res;
    16885  }
    16886  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    16887  *pBuffer = VK_NULL_HANDLE;
    16888  return res;
    16889  }
    16890  return res;
    16891 }
    16892 
    16893 void vmaDestroyBuffer(
    16894  VmaAllocator allocator,
    16895  VkBuffer buffer,
    16896  VmaAllocation allocation)
    16897 {
    16898  VMA_ASSERT(allocator);
    16899 
    16900  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    16901  {
    16902  return;
    16903  }
    16904 
    16905  VMA_DEBUG_LOG("vmaDestroyBuffer");
    16906 
    16907  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16908 
    16909 #if VMA_RECORDING_ENABLED
    16910  if(allocator->GetRecorder() != VMA_NULL)
    16911  {
    16912  allocator->GetRecorder()->RecordDestroyBuffer(
    16913  allocator->GetCurrentFrameIndex(),
    16914  allocation);
    16915  }
    16916 #endif
    16917 
    16918  if(buffer != VK_NULL_HANDLE)
    16919  {
    16920  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    16921  }
    16922 
    16923  if(allocation != VK_NULL_HANDLE)
    16924  {
    16925  allocator->FreeMemory(
    16926  1, // allocationCount
    16927  &allocation);
    16928  }
    16929 }
    16930 
    16931 VkResult vmaCreateImage(
    16932  VmaAllocator allocator,
    16933  const VkImageCreateInfo* pImageCreateInfo,
    16934  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    16935  VkImage* pImage,
    16936  VmaAllocation* pAllocation,
    16937  VmaAllocationInfo* pAllocationInfo)
    16938 {
    16939  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    16940 
    16941  if(pImageCreateInfo->extent.width == 0 ||
    16942  pImageCreateInfo->extent.height == 0 ||
    16943  pImageCreateInfo->extent.depth == 0 ||
    16944  pImageCreateInfo->mipLevels == 0 ||
    16945  pImageCreateInfo->arrayLayers == 0)
    16946  {
    16947  return VK_ERROR_VALIDATION_FAILED_EXT;
    16948  }
    16949 
    16950  VMA_DEBUG_LOG("vmaCreateImage");
    16951 
    16952  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16953 
    16954  *pImage = VK_NULL_HANDLE;
    16955  *pAllocation = VK_NULL_HANDLE;
    16956 
    16957  // 1. Create VkImage.
    16958  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    16959  allocator->m_hDevice,
    16960  pImageCreateInfo,
    16961  allocator->GetAllocationCallbacks(),
    16962  pImage);
    16963  if(res >= 0)
    16964  {
    16965  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    16966  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    16967  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    16968 
    16969  // 2. Allocate memory using allocator.
    16970  VkMemoryRequirements vkMemReq = {};
    16971  bool requiresDedicatedAllocation = false;
    16972  bool prefersDedicatedAllocation = false;
    16973  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
    16974  requiresDedicatedAllocation, prefersDedicatedAllocation);
    16975 
    16976  res = allocator->AllocateMemory(
    16977  vkMemReq,
    16978  requiresDedicatedAllocation,
    16979  prefersDedicatedAllocation,
    16980  VK_NULL_HANDLE, // dedicatedBuffer
    16981  *pImage, // dedicatedImage
    16982  *pAllocationCreateInfo,
    16983  suballocType,
    16984  1, // allocationCount
    16985  pAllocation);
    16986 
    16987 #if VMA_RECORDING_ENABLED
    16988  if(allocator->GetRecorder() != VMA_NULL)
    16989  {
    16990  allocator->GetRecorder()->RecordCreateImage(
    16991  allocator->GetCurrentFrameIndex(),
    16992  *pImageCreateInfo,
    16993  *pAllocationCreateInfo,
    16994  *pAllocation);
    16995  }
    16996 #endif
    16997 
    16998  if(res >= 0)
    16999  {
    17000  // 3. Bind image with memory.
    17001  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
    17002  {
    17003  res = allocator->BindImageMemory(*pAllocation, *pImage);
    17004  }
    17005  if(res >= 0)
    17006  {
    17007  // All steps succeeded.
    17008  #if VMA_STATS_STRING_ENABLED
    17009  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
    17010  #endif
    17011  if(pAllocationInfo != VMA_NULL)
    17012  {
    17013  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    17014  }
    17015 
    17016  return VK_SUCCESS;
    17017  }
    17018  allocator->FreeMemory(
    17019  1, // allocationCount
    17020  pAllocation);
    17021  *pAllocation = VK_NULL_HANDLE;
    17022  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    17023  *pImage = VK_NULL_HANDLE;
    17024  return res;
    17025  }
    17026  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    17027  *pImage = VK_NULL_HANDLE;
    17028  return res;
    17029  }
    17030  return res;
    17031 }
    17032 
    17033 void vmaDestroyImage(
    17034  VmaAllocator allocator,
    17035  VkImage image,
    17036  VmaAllocation allocation)
    17037 {
    17038  VMA_ASSERT(allocator);
    17039 
    17040  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    17041  {
    17042  return;
    17043  }
    17044 
    17045  VMA_DEBUG_LOG("vmaDestroyImage");
    17046 
    17047  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    17048 
    17049 #if VMA_RECORDING_ENABLED
    17050  if(allocator->GetRecorder() != VMA_NULL)
    17051  {
    17052  allocator->GetRecorder()->RecordDestroyImage(
    17053  allocator->GetCurrentFrameIndex(),
    17054  allocation);
    17055  }
    17056 #endif
    17057 
    17058  if(image != VK_NULL_HANDLE)
    17059  {
    17060  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    17061  }
    17062  if(allocation != VK_NULL_HANDLE)
    17063  {
    17064  allocator->FreeMemory(
    17065  1, // allocationCount
    17066  &allocation);
    17067  }
    17068 }
    17069 
    17070 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:1787
    +
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:2087
    void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
    Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
    -
    VkPhysicalDevice physicalDevice
    Vulkan physical device.
    Definition: vk_mem_alloc.h:1844
    -
    uint32_t maxCpuAllocationsToMove
    Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
    Definition: vk_mem_alloc.h:2897
    +
    VkPhysicalDevice physicalDevice
    Vulkan physical device.
    Definition: vk_mem_alloc.h:1845
    +
    uint32_t maxCpuAllocationsToMove
    Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
    Definition: vk_mem_alloc.h:2898
    VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
    Deprecated. Compacts memory by moving allocations.
    void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    Invalidates memory of given allocation.
    Represents single memory allocation.
    -
    Definition: vk_mem_alloc.h:1818
    -
    size_t blockCount
    Number of VkDeviceMemory blocks allocated for this pool.
    Definition: vk_mem_alloc.h:2417
    -
    PFN_vkCreateBuffer vkCreateBuffer
    Definition: vk_mem_alloc.h:1798
    +
    Definition: vk_mem_alloc.h:1819
    +
    size_t blockCount
    Number of VkDeviceMemory blocks allocated for this pool.
    Definition: vk_mem_alloc.h:2418
    +
    PFN_vkCreateBuffer vkCreateBuffer
    Definition: vk_mem_alloc.h:1799
    void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
    struct VmaStats VmaStats
    General statistics from current state of Allocator.
    -
    Definition: vk_mem_alloc.h:2048
    -
    Definition: vk_mem_alloc.h:2152
    -
    VmaDefragmentationFlags flags
    Reserved for future use. Should be 0.
    Definition: vk_mem_alloc.h:2850
    -
    PFN_vkMapMemory vkMapMemory
    Definition: vk_mem_alloc.h:1790
    -
    VkDeviceMemory deviceMemory
    Handle to Vulkan memory object.
    Definition: vk_mem_alloc.h:2517
    -
    VmaAllocatorCreateFlags flags
    Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1841
    -
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:2933
    -
    Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
    Definition: vk_mem_alloc.h:2306
    -
    #define VMA_RECORDING_ENABLED
    Definition: vk_mem_alloc.h:1685
    +
    Definition: vk_mem_alloc.h:2049
    +
    Definition: vk_mem_alloc.h:2153
    +
    VmaDefragmentationFlags flags
    Reserved for future use. Should be 0.
    Definition: vk_mem_alloc.h:2851
    +
    PFN_vkMapMemory vkMapMemory
    Definition: vk_mem_alloc.h:1791
    +
    VkDeviceMemory deviceMemory
    Handle to Vulkan memory object.
    Definition: vk_mem_alloc.h:2518
    +
    VmaAllocatorCreateFlags flags
    Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1842
    +
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:2934
    +
    Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
    Definition: vk_mem_alloc.h:2307
    +
    #define VMA_RECORDING_ENABLED
    Definition: vk_mem_alloc.h:1686
    void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
    Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
    -
    VkDeviceSize size
    Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
    Definition: vk_mem_alloc.h:2398
    -
    Definition: vk_mem_alloc.h:2123
    -
    uint32_t allocationCount
    Number of allocations in pAllocations array.
    Definition: vk_mem_alloc.h:2853
    -
    VkFlags VmaAllocatorCreateFlags
    Definition: vk_mem_alloc.h:1779
    -
    VkMemoryPropertyFlags preferredFlags
    Flags that preferably should be set in a memory type chosen for an allocation.
    Definition: vk_mem_alloc.h:2205
    -
    Definition: vk_mem_alloc.h:2075
    -
    const VkAllocationCallbacks * pAllocationCallbacks
    Custom CPU memory allocation callbacks. Optional.
    Definition: vk_mem_alloc.h:1853
    -
    Enables alternative, buddy allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2334
    +
    VkDeviceSize size
    Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
    Definition: vk_mem_alloc.h:2399
    +
    Definition: vk_mem_alloc.h:2124
    +
    uint32_t allocationCount
    Number of allocations in pAllocations array.
    Definition: vk_mem_alloc.h:2854
    +
    VkFlags VmaAllocatorCreateFlags
    Definition: vk_mem_alloc.h:1780
    +
    VkMemoryPropertyFlags preferredFlags
    Flags that preferably should be set in a memory type chosen for an allocation.
    Definition: vk_mem_alloc.h:2206
    +
    Definition: vk_mem_alloc.h:2076
    +
    const VkAllocationCallbacks * pAllocationCallbacks
    Custom CPU memory allocation callbacks. Optional.
    Definition: vk_mem_alloc.h:1854
    +
    Enables alternative, buddy allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2335
    void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
    Retrieves statistics from current state of the Allocator.
    -
    const VmaVulkanFunctions * pVulkanFunctions
    Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1.
    Definition: vk_mem_alloc.h:1907
    -
    Description of a Allocator to be created.
    Definition: vk_mem_alloc.h:1838
    +
    const VmaVulkanFunctions * pVulkanFunctions
    Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1.
    Definition: vk_mem_alloc.h:1908
    +
    Description of a Allocator to be created.
    Definition: vk_mem_alloc.h:1839
    void vmaDestroyAllocator(VmaAllocator allocator)
    Destroys allocator object.
    -
    VmaAllocationCreateFlagBits
    Flags to be passed as VmaAllocationCreateInfo::flags.
    Definition: vk_mem_alloc.h:2079
    +
    VmaAllocationCreateFlagBits
    Flags to be passed as VmaAllocationCreateInfo::flags.
    Definition: vk_mem_alloc.h:2080
    void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
    Returns current information about specified allocation and atomically marks it as used in current fra...
    -
    VkDeviceSize allocationSizeMax
    Definition: vk_mem_alloc.h:1979
    -
    PFN_vkBindImageMemory vkBindImageMemory
    Definition: vk_mem_alloc.h:1795
    -
    VmaPool * pPools
    Either null or pointer to array of pools to be defragmented.
    Definition: vk_mem_alloc.h:2887
    -
    VkDeviceSize unusedBytes
    Total number of bytes occupied by unused ranges.
    Definition: vk_mem_alloc.h:1978
    -
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:2937
    +
    VkDeviceSize allocationSizeMax
    Definition: vk_mem_alloc.h:1980
    +
    PFN_vkBindImageMemory vkBindImageMemory
    Definition: vk_mem_alloc.h:1796
    +
    VmaPool * pPools
    Either null or pointer to array of pools to be defragmented.
    Definition: vk_mem_alloc.h:2888
    +
    VkDeviceSize unusedBytes
    Total number of bytes occupied by unused ranges.
    Definition: vk_mem_alloc.h:1979
    +
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:2938
    void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
    Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(),...
    -
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:1870
    -
    VmaStatInfo total
    Definition: vk_mem_alloc.h:1988
    -
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:2945
    -
    VmaAllocationCreateFlags flags
    Use VmaAllocationCreateFlagBits enum.
    Definition: vk_mem_alloc.h:2189
    -
    Definition: vk_mem_alloc.h:2147
    -
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places.
    Definition: vk_mem_alloc.h:2928
    -
    PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
    Definition: vk_mem_alloc.h:1796
    -
    void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called after successful vkAllocateMemory.
    Definition: vk_mem_alloc.h:1721
    +
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:1871
    +
    VmaStatInfo total
    Definition: vk_mem_alloc.h:1989
    +
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:2946
    +
    VmaAllocationCreateFlags flags
    Use VmaAllocationCreateFlagBits enum.
    Definition: vk_mem_alloc.h:2190
    +
    Definition: vk_mem_alloc.h:2148
    +
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places.
    Definition: vk_mem_alloc.h:2929
    +
    PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
    Definition: vk_mem_alloc.h:1797
    +
    void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called after successful vkAllocateMemory.
    Definition: vk_mem_alloc.h:1722
    Represents main object of this library initialized.
    -
    VkDevice device
    Vulkan device.
    Definition: vk_mem_alloc.h:1847
    +
    VkDevice device
    Vulkan device.
    Definition: vk_mem_alloc.h:1848
    void vmaFreeMemoryPages(VmaAllocator allocator, size_t allocationCount, VmaAllocation *pAllocations)
    Frees memory and destroys multiple allocations.
    VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
    Binds buffer to allocation.
    -
    Describes parameter of created VmaPool.
    Definition: vk_mem_alloc.h:2348
    -
    Definition: vk_mem_alloc.h:2342
    -
    PFN_vkCmdCopyBuffer vkCmdCopyBuffer
    Definition: vk_mem_alloc.h:1802
    -
    const VmaRecordSettings * pRecordSettings
    Parameters for recording of VMA calls. Can be null.
    Definition: vk_mem_alloc.h:1914
    -
    VkDeviceSize size
    Size of this allocation, in bytes.
    Definition: vk_mem_alloc.h:2527
    +
    Describes parameter of created VmaPool.
    Definition: vk_mem_alloc.h:2349
    +
    Definition: vk_mem_alloc.h:2343
    +
    PFN_vkCmdCopyBuffer vkCmdCopyBuffer
    Definition: vk_mem_alloc.h:1803
    +
    const VmaRecordSettings * pRecordSettings
    Parameters for recording of VMA calls. Can be null.
    Definition: vk_mem_alloc.h:1915
    +
    VkDeviceSize size
    Size of this allocation, in bytes.
    Definition: vk_mem_alloc.h:2528
    void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
    Given Memory Type Index, returns Property Flags of this memory type.
    -
    PFN_vkUnmapMemory vkUnmapMemory
    Definition: vk_mem_alloc.h:1791
    +
    PFN_vkUnmapMemory vkUnmapMemory
    Definition: vk_mem_alloc.h:1792
    VkResult vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
    Begins defragmentation process.
    -
    Enables flush after recording every function call.
    Definition: vk_mem_alloc.h:1816
    -
    void * pUserData
    Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
    Definition: vk_mem_alloc.h:2226
    -
    size_t minBlockCount
    Minimum number of blocks to be always allocated in this pool, even if they stay empty.
    Definition: vk_mem_alloc.h:2368
    -
    size_t allocationCount
    Number of VmaAllocation objects created from this pool that were not destroyed or lost.
    Definition: vk_mem_alloc.h:2404
    +
    Enables flush after recording every function call.
    Definition: vk_mem_alloc.h:1817
    +
    void * pUserData
    Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
    Definition: vk_mem_alloc.h:2227
    +
    size_t minBlockCount
    Minimum number of blocks to be always allocated in this pool, even if they stay empty.
    Definition: vk_mem_alloc.h:2369
    +
    size_t allocationCount
    Number of VmaAllocation objects created from this pool that were not destroyed or lost.
    Definition: vk_mem_alloc.h:2405
    struct VmaVulkanFunctions VmaVulkanFunctions
    Pointers to some Vulkan functions - a subset used by the library.
    -
    Definition: vk_mem_alloc.h:1777
    -
    uint32_t memoryTypeIndex
    Vulkan memory type index to allocate this pool from.
    Definition: vk_mem_alloc.h:2351
    +
    Definition: vk_mem_alloc.h:1778
    +
    uint32_t memoryTypeIndex
    Vulkan memory type index to allocate this pool from.
    Definition: vk_mem_alloc.h:2352
    VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
    -
    VkDeviceSize maxGpuBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
    Definition: vk_mem_alloc.h:2902
    -
    VmaMemoryUsage
    Definition: vk_mem_alloc.h:2026
    +
    VkDeviceSize maxGpuBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
    Definition: vk_mem_alloc.h:2903
    +
    VmaMemoryUsage
    Definition: vk_mem_alloc.h:2027
    struct VmaAllocationInfo VmaAllocationInfo
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    -
    VmaAllocation * pAllocations
    Pointer to array of allocations that can be defragmented.
    Definition: vk_mem_alloc.h:2862
    +
    VmaAllocation * pAllocations
    Pointer to array of allocations that can be defragmented.
    Definition: vk_mem_alloc.h:2863
    void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    Flushes memory of given allocation.
    -
    Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:2923
    +
    Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:2924
    struct VmaPoolCreateInfo VmaPoolCreateInfo
    Describes parameter of created VmaPool.
    void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
    Destroys VmaPool object and frees Vulkan device memory.
    -
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
    Definition: vk_mem_alloc.h:2941
    -
    Definition: vk_mem_alloc.h:2065
    -
    uint32_t memoryTypeBits
    Bitmask containing one bit set for every memory type acceptable for this allocation.
    Definition: vk_mem_alloc.h:2213
    -
    PFN_vkBindBufferMemory vkBindBufferMemory
    Definition: vk_mem_alloc.h:1794
    +
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
    Definition: vk_mem_alloc.h:2942
    +
    Definition: vk_mem_alloc.h:2066
    +
    uint32_t memoryTypeBits
    Bitmask containing one bit set for every memory type acceptable for this allocation.
    Definition: vk_mem_alloc.h:2214
    +
    PFN_vkBindBufferMemory vkBindBufferMemory
    Definition: vk_mem_alloc.h:1795
    Represents custom memory pool.
    void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
    Retrieves statistics of existing VmaPool object.
    struct VmaDefragmentationInfo VmaDefragmentationInfo
    Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
    VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
    Ends defragmentation process.
    -
    General statistics from current state of Allocator.
    Definition: vk_mem_alloc.h:1984
    -
    void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called before vkFreeMemory.
    Definition: vk_mem_alloc.h:1727
    -
    VkFlags VmaDefragmentationFlags
    Definition: vk_mem_alloc.h:2841
    +
    General statistics from current state of Allocator.
    Definition: vk_mem_alloc.h:1985
    +
    void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called before vkFreeMemory.
    Definition: vk_mem_alloc.h:1728
    +
    VkFlags VmaDefragmentationFlags
    Definition: vk_mem_alloc.h:2842
    void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
    Sets pUserData in given allocation to new value.
    -
    Definition: vk_mem_alloc.h:2839
    - -
    VkBool32 * pAllocationsChanged
    Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
    Definition: vk_mem_alloc.h:2868
    +
    Definition: vk_mem_alloc.h:2840
    + +
    VkBool32 * pAllocationsChanged
    Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
    Definition: vk_mem_alloc.h:2869
    VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
    Allocates Vulkan device memory and creates VmaPool object.
    -
    VmaAllocatorCreateFlagBits
    Flags for created VmaAllocator.
    Definition: vk_mem_alloc.h:1748
    +
    VmaAllocatorCreateFlagBits
    Flags for created VmaAllocator.
    Definition: vk_mem_alloc.h:1749
    VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
    Binds image to allocation.
    struct VmaStatInfo VmaStatInfo
    Calculated statistics of memory usage in entire allocator.
    -
    VkFlags VmaRecordFlags
    Definition: vk_mem_alloc.h:1820
    -
    Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
    Definition: vk_mem_alloc.h:1753
    -
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:2943
    +
    VkFlags VmaRecordFlags
    Definition: vk_mem_alloc.h:1821
    +
    Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
    Definition: vk_mem_alloc.h:1754
    +
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:2944
    void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
    Creates new allocation that is in lost state from the beginning.
    -
    VkMemoryPropertyFlags requiredFlags
    Flags that must be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:2200
    -
    VkDeviceSize unusedRangeSizeMax
    Size of the largest continuous free memory region available for new allocation.
    Definition: vk_mem_alloc.h:2414
    +
    VkMemoryPropertyFlags requiredFlags
    Flags that must be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:2201
    +
    VkDeviceSize unusedRangeSizeMax
    Size of the largest continuous free memory region available for new allocation.
    Definition: vk_mem_alloc.h:2415
    void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
    Builds and returns statistics as string in JSON format.
    -
    PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
    Definition: vk_mem_alloc.h:1787
    -
    Calculated statistics of memory usage in entire allocator.
    Definition: vk_mem_alloc.h:1967
    -
    VkDeviceSize blockSize
    Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes....
    Definition: vk_mem_alloc.h:2363
    -
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    Definition: vk_mem_alloc.h:1740
    -
    Definition: vk_mem_alloc.h:2338
    +
    PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
    Definition: vk_mem_alloc.h:1788
    +
    Calculated statistics of memory usage in entire allocator.
    Definition: vk_mem_alloc.h:1968
    +
    VkDeviceSize blockSize
    Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes....
    Definition: vk_mem_alloc.h:2364
    +
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    Definition: vk_mem_alloc.h:1741
    +
    Definition: vk_mem_alloc.h:2339
    VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    -
    Definition: vk_mem_alloc.h:2130
    +
    Definition: vk_mem_alloc.h:2131
    Represents Opaque object that represents started defragmentation process.
    -
    VkDeviceSize unusedRangeSizeMin
    Definition: vk_mem_alloc.h:1980
    -
    PFN_vmaFreeDeviceMemoryFunction pfnFree
    Optional, can be null.
    Definition: vk_mem_alloc.h:1744
    -
    Definition: vk_mem_alloc.h:2163
    -
    VmaPoolCreateFlags flags
    Use combination of VmaPoolCreateFlagBits.
    Definition: vk_mem_alloc.h:2354
    -
    Definition: vk_mem_alloc.h:2074
    -
    PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
    Definition: vk_mem_alloc.h:1793
    +
    VkDeviceSize unusedRangeSizeMin
    Definition: vk_mem_alloc.h:1981
    +
    PFN_vmaFreeDeviceMemoryFunction pfnFree
    Optional, can be null.
    Definition: vk_mem_alloc.h:1745
    +
    Definition: vk_mem_alloc.h:2164
    +
    VmaPoolCreateFlags flags
    Use combination of VmaPoolCreateFlagBits.
    Definition: vk_mem_alloc.h:2355
    +
    Definition: vk_mem_alloc.h:2075
    +
    PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
    Definition: vk_mem_alloc.h:1794
    struct VmaPoolStats VmaPoolStats
    Describes parameter of existing VmaPool.
    VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    Function similar to vmaCreateBuffer().
    -
    VmaMemoryUsage usage
    Intended usage of memory.
    Definition: vk_mem_alloc.h:2195
    -
    Definition: vk_mem_alloc.h:2186
    +
    VmaMemoryUsage usage
    Intended usage of memory.
    Definition: vk_mem_alloc.h:2196
    +
    Definition: vk_mem_alloc.h:2187
    VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
    -
    uint32_t blockCount
    Number of VkDeviceMemory Vulkan memory blocks allocated.
    Definition: vk_mem_alloc.h:1970
    -
    PFN_vkFreeMemory vkFreeMemory
    Definition: vk_mem_alloc.h:1789
    -
    size_t maxBlockCount
    Maximum number of blocks that can be allocated in this pool. Optional.
    Definition: vk_mem_alloc.h:2376
    -
    const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
    Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
    Definition: vk_mem_alloc.h:1856
    -
    size_t unusedRangeCount
    Number of continuous memory ranges in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2407
    -
    VkFlags VmaAllocationCreateFlags
    Definition: vk_mem_alloc.h:2184
    -
    VkDeviceSize maxCpuBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
    Definition: vk_mem_alloc.h:2892
    -
    VmaPool pool
    Pool that this allocation should be created in.
    Definition: vk_mem_alloc.h:2219
    +
    uint32_t blockCount
    Number of VkDeviceMemory Vulkan memory blocks allocated.
    Definition: vk_mem_alloc.h:1971
    +
    PFN_vkFreeMemory vkFreeMemory
    Definition: vk_mem_alloc.h:1790
    +
    size_t maxBlockCount
    Maximum number of blocks that can be allocated in this pool. Optional.
    Definition: vk_mem_alloc.h:2377
    +
    const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
    Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
    Definition: vk_mem_alloc.h:1857
    +
    size_t unusedRangeCount
    Number of continuous memory ranges in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2408
    +
    VkFlags VmaAllocationCreateFlags
    Definition: vk_mem_alloc.h:2185
    +
    VkDeviceSize maxCpuBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
    Definition: vk_mem_alloc.h:2893
    +
    VmaPool pool
    Pool that this allocation should be created in.
    Definition: vk_mem_alloc.h:2220
    void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
    -
    const VkDeviceSize * pHeapSizeLimit
    Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
    Definition: vk_mem_alloc.h:1895
    -
    VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
    Definition: vk_mem_alloc.h:1986
    -
    Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
    Definition: vk_mem_alloc.h:2110
    -
    VkDeviceSize allocationSizeMin
    Definition: vk_mem_alloc.h:1979
    +
    const VkDeviceSize * pHeapSizeLimit
    Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
    Definition: vk_mem_alloc.h:1896
    +
    VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
    Definition: vk_mem_alloc.h:1987
    +
    Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
    Definition: vk_mem_alloc.h:2111
    +
    VkDeviceSize allocationSizeMin
    Definition: vk_mem_alloc.h:1980
    VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
    -
    PFN_vkCreateImage vkCreateImage
    Definition: vk_mem_alloc.h:1800
    -
    VmaRecordFlags flags
    Flags for recording. Use VmaRecordFlagBits enum.
    Definition: vk_mem_alloc.h:1826
    -
    VmaDefragmentationFlagBits
    Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
    Definition: vk_mem_alloc.h:2838
    -
    VkCommandBuffer commandBuffer
    Optional. Command buffer where GPU copy commands will be posted.
    Definition: vk_mem_alloc.h:2916
    -
    PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
    Optional, can be null.
    Definition: vk_mem_alloc.h:1742
    -
    PFN_vkDestroyBuffer vkDestroyBuffer
    Definition: vk_mem_alloc.h:1799
    +
    PFN_vkCreateImage vkCreateImage
    Definition: vk_mem_alloc.h:1801
    +
    VmaRecordFlags flags
    Flags for recording. Use VmaRecordFlagBits enum.
    Definition: vk_mem_alloc.h:1827
    +
    VmaDefragmentationFlagBits
    Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
    Definition: vk_mem_alloc.h:2839
    +
    VkCommandBuffer commandBuffer
    Optional. Command buffer where GPU copy commands will be posted.
    Definition: vk_mem_alloc.h:2917
    +
    PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
    Optional, can be null.
    Definition: vk_mem_alloc.h:1743
    +
    PFN_vkDestroyBuffer vkDestroyBuffer
    Definition: vk_mem_alloc.h:1800
    VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
    Maps memory represented by given allocation and returns pointer to it.
    -
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:2390
    -
    PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
    Definition: vk_mem_alloc.h:1792
    -
    Definition: vk_mem_alloc.h:2141
    +
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:2391
    +
    PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
    Definition: vk_mem_alloc.h:1793
    +
    Definition: vk_mem_alloc.h:2142
    VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    Function similar to vmaAllocateMemoryForBuffer().
    struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
    Description of a Allocator to be created.
    -
    const char * pFilePath
    Path to the file that should be written by the recording.
    Definition: vk_mem_alloc.h:1834
    -
    void * pUserData
    Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
    Definition: vk_mem_alloc.h:2541
    -
    VkDeviceSize preferredLargeHeapBlockSize
    Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB....
    Definition: vk_mem_alloc.h:1850
    -
    VkDeviceSize allocationSizeAvg
    Definition: vk_mem_alloc.h:1979
    +
    const char * pFilePath
    Path to the file that should be written by the recording.
    Definition: vk_mem_alloc.h:1835
    +
    void * pUserData
    Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
    Definition: vk_mem_alloc.h:2542
    +
    VkDeviceSize preferredLargeHeapBlockSize
    Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB....
    Definition: vk_mem_alloc.h:1851
    +
    VkDeviceSize allocationSizeAvg
    Definition: vk_mem_alloc.h:1980
    VkResult vmaAllocateMemoryPages(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo)
    General purpose memory allocation for multiple allocation objects at once.
    -
    VkDeviceSize usedBytes
    Total number of bytes occupied by all allocations.
    Definition: vk_mem_alloc.h:1976
    +
    VkDeviceSize usedBytes
    Total number of bytes occupied by all allocations.
    Definition: vk_mem_alloc.h:1977
    struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    Checks magic number in margins around all allocations in given memory types (in both default and cust...
    -
    Describes parameter of existing VmaPool.
    Definition: vk_mem_alloc.h:2395
    -
    Parameters for defragmentation.
    Definition: vk_mem_alloc.h:2847
    +
    Describes parameter of existing VmaPool.
    Definition: vk_mem_alloc.h:2396
    +
    Parameters for defragmentation.
    Definition: vk_mem_alloc.h:2848
    VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    Checks magic number in margins around all allocations in given memory pool in search for corruptions.
    -
    Definition: vk_mem_alloc.h:2156
    -
    VkDeviceSize offset
    Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory,...
    Definition: vk_mem_alloc.h:2522
    -
    Definition: vk_mem_alloc.h:2170
    -
    Definition: vk_mem_alloc.h:2182
    -
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places.
    Definition: vk_mem_alloc.h:2939
    -
    Pointers to some Vulkan functions - a subset used by the library.
    Definition: vk_mem_alloc.h:1785
    +
    Definition: vk_mem_alloc.h:2157
    +
    VkDeviceSize offset
    Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory,...
    Definition: vk_mem_alloc.h:2523
    +
    Definition: vk_mem_alloc.h:2171
    +
    Definition: vk_mem_alloc.h:2183
    +
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places.
    Definition: vk_mem_alloc.h:2940
    +
    Pointers to some Vulkan functions - a subset used by the library.
    Definition: vk_mem_alloc.h:1786
    VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
    Creates Allocator object.
    -
    uint32_t unusedRangeCount
    Number of free ranges of memory between allocations.
    Definition: vk_mem_alloc.h:1974
    -
    Definition: vk_mem_alloc.h:2031
    -
    VkFlags VmaPoolCreateFlags
    Definition: vk_mem_alloc.h:2344
    +
    uint32_t unusedRangeCount
    Number of free ranges of memory between allocations.
    Definition: vk_mem_alloc.h:1975
    +
    Definition: vk_mem_alloc.h:2032
    +
    VkFlags VmaPoolCreateFlags
    Definition: vk_mem_alloc.h:2345
    void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    -
    Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
    Definition: vk_mem_alloc.h:1823
    -
    uint32_t allocationCount
    Number of VmaAllocation allocation objects allocated.
    Definition: vk_mem_alloc.h:1972
    -
    PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
    Definition: vk_mem_alloc.h:1797
    -
    PFN_vkDestroyImage vkDestroyImage
    Definition: vk_mem_alloc.h:1801
    -
    Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
    Definition: vk_mem_alloc.h:2097
    -
    Definition: vk_mem_alloc.h:2177
    -
    Definition: vk_mem_alloc.h:2058
    -
    void * pMappedData
    Pointer to the beginning of this allocation as mapped data.
    Definition: vk_mem_alloc.h:2536
    +
    Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
    Definition: vk_mem_alloc.h:1824
    +
    uint32_t allocationCount
    Number of VmaAllocation allocation objects allocated.
    Definition: vk_mem_alloc.h:1973
    +
    PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
    Definition: vk_mem_alloc.h:1798
    +
    PFN_vkDestroyImage vkDestroyImage
    Definition: vk_mem_alloc.h:1802
    +
    Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
    Definition: vk_mem_alloc.h:2098
    +
    Definition: vk_mem_alloc.h:2178
    +
    Definition: vk_mem_alloc.h:2059
    +
    void * pMappedData
    Pointer to the beginning of this allocation as mapped data.
    Definition: vk_mem_alloc.h:2537
    void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
    Destroys Vulkan image and frees allocated memory.
    -
    Enables usage of VK_KHR_dedicated_allocation extension.
    Definition: vk_mem_alloc.h:1775
    +
    Enables usage of VK_KHR_dedicated_allocation extension.
    Definition: vk_mem_alloc.h:1776
    struct VmaDefragmentationStats VmaDefragmentationStats
    Statistics returned by function vmaDefragment().
    -
    PFN_vkAllocateMemory vkAllocateMemory
    Definition: vk_mem_alloc.h:1788
    -
    Enables alternative, linear allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2323
    +
    PFN_vkAllocateMemory vkAllocateMemory
    Definition: vk_mem_alloc.h:1789
    +
    Enables alternative, linear allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2324
    VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
    Tries to resize an allocation in place, if there is enough free memory after it.
    -
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    Definition: vk_mem_alloc.h:2503
    +
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    Definition: vk_mem_alloc.h:2504
    VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    General purpose memory allocation.
    void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
    Sets index of the current frame.
    struct VmaAllocationCreateInfo VmaAllocationCreateInfo
    VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    -
    Definition: vk_mem_alloc.h:2167
    -
    VmaPoolCreateFlagBits
    Flags to be passed as VmaPoolCreateInfo::flags.
    Definition: vk_mem_alloc.h:2288
    -
    VkDeviceSize unusedRangeSizeAvg
    Definition: vk_mem_alloc.h:1980
    +
    Definition: vk_mem_alloc.h:2168
    +
    VmaPoolCreateFlagBits
    Flags to be passed as VmaPoolCreateInfo::flags.
    Definition: vk_mem_alloc.h:2289
    +
    VkDeviceSize unusedRangeSizeAvg
    Definition: vk_mem_alloc.h:1981
    VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
    Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame.
    - -
    VmaRecordFlagBits
    Flags to be used in VmaRecordSettings::flags.
    Definition: vk_mem_alloc.h:1810
    -
    VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
    Definition: vk_mem_alloc.h:1987
    + +
    VmaRecordFlagBits
    Flags to be used in VmaRecordSettings::flags.
    Definition: vk_mem_alloc.h:1811
    +
    VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
    Definition: vk_mem_alloc.h:1988
    void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
    Destroys Vulkan buffer and frees allocated memory.
    -
    VkDeviceSize unusedSize
    Total number of bytes in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2401
    -
    VkDeviceSize unusedRangeSizeMax
    Definition: vk_mem_alloc.h:1980
    +
    VkDeviceSize unusedSize
    Total number of bytes in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2402
    +
    VkDeviceSize unusedRangeSizeMax
    Definition: vk_mem_alloc.h:1981
    struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
    Parameters for defragmentation.
    -
    uint32_t maxGpuAllocationsToMove
    Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
    Definition: vk_mem_alloc.h:2907
    +
    uint32_t maxGpuAllocationsToMove
    Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
    Definition: vk_mem_alloc.h:2908
    struct VmaRecordSettings VmaRecordSettings
    Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
    -
    uint32_t memoryType
    Memory type index that this allocation was allocated from.
    Definition: vk_mem_alloc.h:2508
    -
    uint32_t poolCount
    Numer of pools in pPools array.
    Definition: vk_mem_alloc.h:2871
    +
    uint32_t memoryType
    Memory type index that this allocation was allocated from.
    Definition: vk_mem_alloc.h:2509
    +
    uint32_t poolCount
    Numer of pools in pPools array.
    Definition: vk_mem_alloc.h:2872