From 6c8b7a2c3eae52b0e388286be1de606a6b49c032 Mon Sep 17 00:00:00 2001 From: Adam Sawicki Date: Tue, 5 Mar 2019 13:40:29 +0100 Subject: [PATCH] Added flag VMA_ALLOCATION_CREATE_DONT_BIND_BIT. --- docs/html/globals.html | 7 +- docs/html/globals_eval.html | 3 + docs/html/index.html | 2 +- docs/html/search/all_10.js | 1 + docs/html/search/enumvalues_0.js | 1 + docs/html/vk__mem__alloc_8h.html | 11 +- docs/html/vk__mem__alloc_8h_source.html | 169 ++++++++++++------------ src/VulkanSample.cpp | 4 +- src/vk_mem_alloc.h | 18 ++- 9 files changed, 120 insertions(+), 96 deletions(-) diff --git a/docs/html/globals.html b/docs/html/globals.html index 5d7c1be..d755ab2 100644 --- a/docs/html/globals.html +++ b/docs/html/globals.html @@ -90,6 +90,9 @@ $(function() {
  • VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT : vk_mem_alloc.h
  • +
  • VMA_ALLOCATION_CREATE_DONT_BIND_BIT +: vk_mem_alloc.h +
  • VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM : vk_mem_alloc.h
  • @@ -262,7 +265,7 @@ $(function() { : vk_mem_alloc.h
  • VmaDefragmentationFlagBits -: vk_mem_alloc.h +: vk_mem_alloc.h
  • VmaDefragmentationFlags : vk_mem_alloc.h @@ -352,7 +355,7 @@ $(function() { : vk_mem_alloc.h
  • VmaRecordFlagBits -: vk_mem_alloc.h +: vk_mem_alloc.h
  • VmaRecordFlags : vk_mem_alloc.h diff --git a/docs/html/globals_eval.html b/docs/html/globals_eval.html index 1fe3ec8..49662c4 100644 --- a/docs/html/globals_eval.html +++ b/docs/html/globals_eval.html @@ -73,6 +73,9 @@ $(function() {
  • VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT : vk_mem_alloc.h
  • +
  • VMA_ALLOCATION_CREATE_DONT_BIND_BIT +: vk_mem_alloc.h +
  • VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM : vk_mem_alloc.h
  • diff --git a/docs/html/index.html b/docs/html/index.html index 87d6e33..4ffc5e4 100644 --- a/docs/html/index.html +++ b/docs/html/index.html @@ -65,7 +65,7 @@ $(function() {
    Vulkan Memory Allocator
    -

    Version 2.2.1-development (2018-12-14)

    +

    Version 2.3.0-development (2019-03-05)

    Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
    License: MIT

    Documentation of all members: vk_mem_alloc.h

    diff --git a/docs/html/search/all_10.js b/docs/html/search/all_10.js index 54a2afc..c8fa58c 100644 --- a/docs/html/search/all_10.js +++ b/docs/html/search/all_10.js @@ -23,6 +23,7 @@ var searchData= ['vma_5fallocation_5fcreate_5fcan_5fbecome_5flost_5fbit',['VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT',['../vk__mem__alloc_8h.html#ad9889c10c798b040d59c92f257cae597a5f436af6c8fe8540573a6d22627a6fd2',1,'vk_mem_alloc.h']]], ['vma_5fallocation_5fcreate_5fcan_5fmake_5fother_5flost_5fbit',['VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT',['../vk__mem__alloc_8h.html#ad9889c10c798b040d59c92f257cae597a68686d0ce9beb0d4d1b9f2b8b1389a7e',1,'vk_mem_alloc.h']]], ['vma_5fallocation_5fcreate_5fdedicated_5fmemory_5fbit',['VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT',['../vk__mem__alloc_8h.html#ad9889c10c798b040d59c92f257cae597a3fc311d855c2ff53f1090ef5c722b38f',1,'vk_mem_alloc.h']]], + ['vma_5fallocation_5fcreate_5fdont_5fbind_5fbit',['VMA_ALLOCATION_CREATE_DONT_BIND_BIT',['../vk__mem__alloc_8h.html#ad9889c10c798b040d59c92f257cae597a2310568c62208af432724305fe29ccea',1,'vk_mem_alloc.h']]], ['vma_5fallocation_5fcreate_5fflag_5fbits_5fmax_5fenum',['VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM',['../vk__mem__alloc_8h.html#ad9889c10c798b040d59c92f257cae597ae5633ec569f4899cf8f29e7385b2f882',1,'vk_mem_alloc.h']]], ['vma_5fallocation_5fcreate_5fmapped_5fbit',['VMA_ALLOCATION_CREATE_MAPPED_BIT',['../vk__mem__alloc_8h.html#ad9889c10c798b040d59c92f257cae597a11da372cc3a82931c5e5d6146cd9dd1f',1,'vk_mem_alloc.h']]], ['vma_5fallocation_5fcreate_5fnever_5fallocate_5fbit',['VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT',['../vk__mem__alloc_8h.html#ad9889c10c798b040d59c92f257cae597a89759603401014eb325eb22a3839f2ff',1,'vk_mem_alloc.h']]], diff --git a/docs/html/search/enumvalues_0.js b/docs/html/search/enumvalues_0.js index cdf4ba2..6353a4c 100644 --- a/docs/html/search/enumvalues_0.js +++ b/docs/html/search/enumvalues_0.js @@ -3,6 +3,7 @@ var searchData= ['vma_5fallocation_5fcreate_5fcan_5fbecome_5flost_5fbit',['VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT',['../vk__mem__alloc_8h.html#ad9889c10c798b040d59c92f257cae597a5f436af6c8fe8540573a6d22627a6fd2',1,'vk_mem_alloc.h']]], ['vma_5fallocation_5fcreate_5fcan_5fmake_5fother_5flost_5fbit',['VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT',['../vk__mem__alloc_8h.html#ad9889c10c798b040d59c92f257cae597a68686d0ce9beb0d4d1b9f2b8b1389a7e',1,'vk_mem_alloc.h']]], ['vma_5fallocation_5fcreate_5fdedicated_5fmemory_5fbit',['VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT',['../vk__mem__alloc_8h.html#ad9889c10c798b040d59c92f257cae597a3fc311d855c2ff53f1090ef5c722b38f',1,'vk_mem_alloc.h']]], + ['vma_5fallocation_5fcreate_5fdont_5fbind_5fbit',['VMA_ALLOCATION_CREATE_DONT_BIND_BIT',['../vk__mem__alloc_8h.html#ad9889c10c798b040d59c92f257cae597a2310568c62208af432724305fe29ccea',1,'vk_mem_alloc.h']]], ['vma_5fallocation_5fcreate_5fflag_5fbits_5fmax_5fenum',['VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM',['../vk__mem__alloc_8h.html#ad9889c10c798b040d59c92f257cae597ae5633ec569f4899cf8f29e7385b2f882',1,'vk_mem_alloc.h']]], ['vma_5fallocation_5fcreate_5fmapped_5fbit',['VMA_ALLOCATION_CREATE_MAPPED_BIT',['../vk__mem__alloc_8h.html#ad9889c10c798b040d59c92f257cae597a11da372cc3a82931c5e5d6146cd9dd1f',1,'vk_mem_alloc.h']]], ['vma_5fallocation_5fcreate_5fnever_5fallocate_5fbit',['VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT',['../vk__mem__alloc_8h.html#ad9889c10c798b040d59c92f257cae597a89759603401014eb325eb22a3839f2ff',1,'vk_mem_alloc.h']]], diff --git a/docs/html/vk__mem__alloc_8h.html b/docs/html/vk__mem__alloc_8h.html index ac3d7e4..0a89157 100644 --- a/docs/html/vk__mem__alloc_8h.html +++ b/docs/html/vk__mem__alloc_8h.html @@ -234,14 +234,15 @@ Enumerations   VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT = 0x00000010, VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT = 0x00000020, VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = 0x00000040, -VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT = 0x00010000, +VMA_ALLOCATION_CREATE_DONT_BIND_BIT = 0x00000080,
    -  VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT = 0x00020000, +  VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT = 0x00010000, +VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT = 0x00020000, VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT = 0x00040000, VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT, -VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT,
    -  VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT = VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT, +  VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT, +VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT = VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT, VMA_ALLOCATION_CREATE_STRATEGY_MASK, VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
    @@ -895,6 +896,8 @@ Functions VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT 

    Allocation will be created from upper stack in a double stack pool.

    This flag is only allowed for custom pools created with VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT flag.

    +VMA_ALLOCATION_CREATE_DONT_BIND_BIT 

    Create both buffer/image and allocation, but don't bind them together. It is useful when you want to bind yourself to do some more advanced binding, e.g. using some extensions. The flag is meaningful only with functions that bind by default: vmaCreateBuffer(), vmaCreateImage(). Otherwise it is ignored.

    + VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT 

    Allocation strategy that chooses smallest possible free range for the allocation.

    VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT 

    Allocation strategy that chooses biggest possible free range for the allocation.

    diff --git a/docs/html/vk__mem__alloc_8h_source.html b/docs/html/vk__mem__alloc_8h_source.html index 83517c6..e3c14a2 100644 --- a/docs/html/vk__mem__alloc_8h_source.html +++ b/docs/html/vk__mem__alloc_8h_source.html @@ -65,87 +65,88 @@ $(function() {
    vk_mem_alloc.h
    -Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    1644 /*
    1645 Define this macro to 0/1 to disable/enable support for recording functionality,
    1646 available through VmaAllocatorCreateInfo::pRecordSettings.
    1647 */
    1648 #ifndef VMA_RECORDING_ENABLED
    1649  #ifdef _WIN32
    1650  #define VMA_RECORDING_ENABLED 1
    1651  #else
    1652  #define VMA_RECORDING_ENABLED 0
    1653  #endif
    1654 #endif
    1655 
    1656 #ifndef NOMINMAX
    1657  #define NOMINMAX // For windows.h
    1658 #endif
    1659 
    1660 #ifndef VULKAN_H_
    1661  #include <vulkan/vulkan.h>
    1662 #endif
    1663 
    1664 #if VMA_RECORDING_ENABLED
    1665  #include <windows.h>
    1666 #endif
    1667 
    1668 #if !defined(VMA_DEDICATED_ALLOCATION)
    1669  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
    1670  #define VMA_DEDICATED_ALLOCATION 1
    1671  #else
    1672  #define VMA_DEDICATED_ALLOCATION 0
    1673  #endif
    1674 #endif
    1675 
    1685 VK_DEFINE_HANDLE(VmaAllocator)
    1686 
    1687 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    1689  VmaAllocator allocator,
    1690  uint32_t memoryType,
    1691  VkDeviceMemory memory,
    1692  VkDeviceSize size);
    1694 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    1695  VmaAllocator allocator,
    1696  uint32_t memoryType,
    1697  VkDeviceMemory memory,
    1698  VkDeviceSize size);
    1699 
    1713 
    1743 
    1746 typedef VkFlags VmaAllocatorCreateFlags;
    1747 
    1752 typedef struct VmaVulkanFunctions {
    1753  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    1754  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    1755  PFN_vkAllocateMemory vkAllocateMemory;
    1756  PFN_vkFreeMemory vkFreeMemory;
    1757  PFN_vkMapMemory vkMapMemory;
    1758  PFN_vkUnmapMemory vkUnmapMemory;
    1759  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
    1760  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
    1761  PFN_vkBindBufferMemory vkBindBufferMemory;
    1762  PFN_vkBindImageMemory vkBindImageMemory;
    1763  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    1764  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    1765  PFN_vkCreateBuffer vkCreateBuffer;
    1766  PFN_vkDestroyBuffer vkDestroyBuffer;
    1767  PFN_vkCreateImage vkCreateImage;
    1768  PFN_vkDestroyImage vkDestroyImage;
    1769  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
    1770 #if VMA_DEDICATED_ALLOCATION
    1771  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    1772  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    1773 #endif
    1775 
    1777 typedef enum VmaRecordFlagBits {
    1784 
    1787 typedef VkFlags VmaRecordFlags;
    1788 
    1790 typedef struct VmaRecordSettings
    1791 {
    1801  const char* pFilePath;
    1803 
    1806 {
    1810 
    1811  VkPhysicalDevice physicalDevice;
    1813 
    1814  VkDevice device;
    1816 
    1819 
    1820  const VkAllocationCallbacks* pAllocationCallbacks;
    1822 
    1862  const VkDeviceSize* pHeapSizeLimit;
    1883 
    1885 VkResult vmaCreateAllocator(
    1886  const VmaAllocatorCreateInfo* pCreateInfo,
    1887  VmaAllocator* pAllocator);
    1888 
    1890 void vmaDestroyAllocator(
    1891  VmaAllocator allocator);
    1892 
    1898  VmaAllocator allocator,
    1899  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    1900 
    1906  VmaAllocator allocator,
    1907  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    1908 
    1916  VmaAllocator allocator,
    1917  uint32_t memoryTypeIndex,
    1918  VkMemoryPropertyFlags* pFlags);
    1919 
    1929  VmaAllocator allocator,
    1930  uint32_t frameIndex);
    1931 
    1934 typedef struct VmaStatInfo
    1935 {
    1937  uint32_t blockCount;
    1943  VkDeviceSize usedBytes;
    1945  VkDeviceSize unusedBytes;
    1948 } VmaStatInfo;
    1949 
    1951 typedef struct VmaStats
    1952 {
    1953  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    1954  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    1956 } VmaStats;
    1957 
    1959 void vmaCalculateStats(
    1960  VmaAllocator allocator,
    1961  VmaStats* pStats);
    1962 
    1963 #define VMA_STATS_STRING_ENABLED 1
    1964 
    1965 #if VMA_STATS_STRING_ENABLED
    1966 
    1968 
    1970 void vmaBuildStatsString(
    1971  VmaAllocator allocator,
    1972  char** ppStatsString,
    1973  VkBool32 detailedMap);
    1974 
    1975 void vmaFreeStatsString(
    1976  VmaAllocator allocator,
    1977  char* pStatsString);
    1978 
    1979 #endif // #if VMA_STATS_STRING_ENABLED
    1980 
    1989 VK_DEFINE_HANDLE(VmaPool)
    1990 
    1991 typedef enum VmaMemoryUsage
    1992 {
    2041 } VmaMemoryUsage;
    2042 
    2052 
    2107 
    2123 
    2133 
    2140 
    2144 
    2146 {
    2159  VkMemoryPropertyFlags requiredFlags;
    2164  VkMemoryPropertyFlags preferredFlags;
    2172  uint32_t memoryTypeBits;
    2185  void* pUserData;
    2187 
    2204 VkResult vmaFindMemoryTypeIndex(
    2205  VmaAllocator allocator,
    2206  uint32_t memoryTypeBits,
    2207  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2208  uint32_t* pMemoryTypeIndex);
    2209 
    2223  VmaAllocator allocator,
    2224  const VkBufferCreateInfo* pBufferCreateInfo,
    2225  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2226  uint32_t* pMemoryTypeIndex);
    2227 
    2241  VmaAllocator allocator,
    2242  const VkImageCreateInfo* pImageCreateInfo,
    2243  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2244  uint32_t* pMemoryTypeIndex);
    2245 
    2266 
    2283 
    2294 
    2300 
    2303 typedef VkFlags VmaPoolCreateFlags;
    2304 
    2307 typedef struct VmaPoolCreateInfo {
    2322  VkDeviceSize blockSize;
    2351 
    2354 typedef struct VmaPoolStats {
    2357  VkDeviceSize size;
    2360  VkDeviceSize unusedSize;
    2373  VkDeviceSize unusedRangeSizeMax;
    2376  size_t blockCount;
    2377 } VmaPoolStats;
    2378 
    2385 VkResult vmaCreatePool(
    2386  VmaAllocator allocator,
    2387  const VmaPoolCreateInfo* pCreateInfo,
    2388  VmaPool* pPool);
    2389 
    2392 void vmaDestroyPool(
    2393  VmaAllocator allocator,
    2394  VmaPool pool);
    2395 
    2402 void vmaGetPoolStats(
    2403  VmaAllocator allocator,
    2404  VmaPool pool,
    2405  VmaPoolStats* pPoolStats);
    2406 
    2414  VmaAllocator allocator,
    2415  VmaPool pool,
    2416  size_t* pLostAllocationCount);
    2417 
    2432 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
    2433 
    2458 VK_DEFINE_HANDLE(VmaAllocation)
    2459 
    2460 
    2462 typedef struct VmaAllocationInfo {
    2467  uint32_t memoryType;
    2476  VkDeviceMemory deviceMemory;
    2481  VkDeviceSize offset;
    2486  VkDeviceSize size;
    2500  void* pUserData;
    2502 
    2513 VkResult vmaAllocateMemory(
    2514  VmaAllocator allocator,
    2515  const VkMemoryRequirements* pVkMemoryRequirements,
    2516  const VmaAllocationCreateInfo* pCreateInfo,
    2517  VmaAllocation* pAllocation,
    2518  VmaAllocationInfo* pAllocationInfo);
    2519 
    2539 VkResult vmaAllocateMemoryPages(
    2540  VmaAllocator allocator,
    2541  const VkMemoryRequirements* pVkMemoryRequirements,
    2542  const VmaAllocationCreateInfo* pCreateInfo,
    2543  size_t allocationCount,
    2544  VmaAllocation* pAllocations,
    2545  VmaAllocationInfo* pAllocationInfo);
    2546 
    2554  VmaAllocator allocator,
    2555  VkBuffer buffer,
    2556  const VmaAllocationCreateInfo* pCreateInfo,
    2557  VmaAllocation* pAllocation,
    2558  VmaAllocationInfo* pAllocationInfo);
    2559 
    2561 VkResult vmaAllocateMemoryForImage(
    2562  VmaAllocator allocator,
    2563  VkImage image,
    2564  const VmaAllocationCreateInfo* pCreateInfo,
    2565  VmaAllocation* pAllocation,
    2566  VmaAllocationInfo* pAllocationInfo);
    2567 
    2572 void vmaFreeMemory(
    2573  VmaAllocator allocator,
    2574  VmaAllocation allocation);
    2575 
    2586 void vmaFreeMemoryPages(
    2587  VmaAllocator allocator,
    2588  size_t allocationCount,
    2589  VmaAllocation* pAllocations);
    2590 
    2611 VkResult vmaResizeAllocation(
    2612  VmaAllocator allocator,
    2613  VmaAllocation allocation,
    2614  VkDeviceSize newSize);
    2615 
    2633  VmaAllocator allocator,
    2634  VmaAllocation allocation,
    2635  VmaAllocationInfo* pAllocationInfo);
    2636 
    2651 VkBool32 vmaTouchAllocation(
    2652  VmaAllocator allocator,
    2653  VmaAllocation allocation);
    2654 
    2669  VmaAllocator allocator,
    2670  VmaAllocation allocation,
    2671  void* pUserData);
    2672 
    2684  VmaAllocator allocator,
    2685  VmaAllocation* pAllocation);
    2686 
    2721 VkResult vmaMapMemory(
    2722  VmaAllocator allocator,
    2723  VmaAllocation allocation,
    2724  void** ppData);
    2725 
    2730 void vmaUnmapMemory(
    2731  VmaAllocator allocator,
    2732  VmaAllocation allocation);
    2733 
    2746 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2747 
    2760 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2761 
    2778 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
    2779 
    2786 VK_DEFINE_HANDLE(VmaDefragmentationContext)
    2787 
    2788 typedef enum VmaDefragmentationFlagBits {
    2792 typedef VkFlags VmaDefragmentationFlags;
    2793 
    2798 typedef struct VmaDefragmentationInfo2 {
    2822  uint32_t poolCount;
    2843  VkDeviceSize maxCpuBytesToMove;
    2853  VkDeviceSize maxGpuBytesToMove;
    2867  VkCommandBuffer commandBuffer;
    2869 
    2874 typedef struct VmaDefragmentationInfo {
    2879  VkDeviceSize maxBytesToMove;
    2886 
    2888 typedef struct VmaDefragmentationStats {
    2890  VkDeviceSize bytesMoved;
    2892  VkDeviceSize bytesFreed;
    2898 
    2925 VkResult vmaDefragmentationBegin(
    2926  VmaAllocator allocator,
    2927  const VmaDefragmentationInfo2* pInfo,
    2928  VmaDefragmentationStats* pStats,
    2929  VmaDefragmentationContext *pContext);
    2930 
    2936 VkResult vmaDefragmentationEnd(
    2937  VmaAllocator allocator,
    2938  VmaDefragmentationContext context);
    2939 
    2980 VkResult vmaDefragment(
    2981  VmaAllocator allocator,
    2982  VmaAllocation* pAllocations,
    2983  size_t allocationCount,
    2984  VkBool32* pAllocationsChanged,
    2985  const VmaDefragmentationInfo *pDefragmentationInfo,
    2986  VmaDefragmentationStats* pDefragmentationStats);
    2987 
    3000 VkResult vmaBindBufferMemory(
    3001  VmaAllocator allocator,
    3002  VmaAllocation allocation,
    3003  VkBuffer buffer);
    3004 
    3017 VkResult vmaBindImageMemory(
    3018  VmaAllocator allocator,
    3019  VmaAllocation allocation,
    3020  VkImage image);
    3021 
    3048 VkResult vmaCreateBuffer(
    3049  VmaAllocator allocator,
    3050  const VkBufferCreateInfo* pBufferCreateInfo,
    3051  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    3052  VkBuffer* pBuffer,
    3053  VmaAllocation* pAllocation,
    3054  VmaAllocationInfo* pAllocationInfo);
    3055 
    3067 void vmaDestroyBuffer(
    3068  VmaAllocator allocator,
    3069  VkBuffer buffer,
    3070  VmaAllocation allocation);
    3071 
    3073 VkResult vmaCreateImage(
    3074  VmaAllocator allocator,
    3075  const VkImageCreateInfo* pImageCreateInfo,
    3076  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    3077  VkImage* pImage,
    3078  VmaAllocation* pAllocation,
    3079  VmaAllocationInfo* pAllocationInfo);
    3080 
    3092 void vmaDestroyImage(
    3093  VmaAllocator allocator,
    3094  VkImage image,
    3095  VmaAllocation allocation);
    3096 
    3097 #ifdef __cplusplus
    3098 }
    3099 #endif
    3100 
    3101 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    3102 
    3103 // For Visual Studio IntelliSense.
    3104 #if defined(__cplusplus) && defined(__INTELLISENSE__)
    3105 #define VMA_IMPLEMENTATION
    3106 #endif
    3107 
    3108 #ifdef VMA_IMPLEMENTATION
    3109 #undef VMA_IMPLEMENTATION
    3110 
    3111 #include <cstdint>
    3112 #include <cstdlib>
    3113 #include <cstring>
    3114 
    3115 /*******************************************************************************
    3116 CONFIGURATION SECTION
    3117 
    3118 Define some of these macros before each #include of this header or change them
    3119 here if you need other then default behavior depending on your environment.
    3120 */
    3121 
    3122 /*
    3123 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    3124 internally, like:
    3125 
    3126  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    3127 
    3128 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    3129 VmaAllocatorCreateInfo::pVulkanFunctions.
    3130 */
    3131 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
    3132 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    3133 #endif
    3134 
    3135 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    3136 //#define VMA_USE_STL_CONTAINERS 1
    3137 
    3138 /* Set this macro to 1 to make the library including and using STL containers:
    3139 std::pair, std::vector, std::list, std::unordered_map.
    3140 
    3141 Set it to 0 or undefined to make the library using its own implementation of
    3142 the containers.
    3143 */
    3144 #if VMA_USE_STL_CONTAINERS
    3145  #define VMA_USE_STL_VECTOR 1
    3146  #define VMA_USE_STL_UNORDERED_MAP 1
    3147  #define VMA_USE_STL_LIST 1
    3148 #endif
    3149 
    3150 #ifndef VMA_USE_STL_SHARED_MUTEX
    3151  // Compiler conforms to C++17.
    3152  #if __cplusplus >= 201703L
    3153  #define VMA_USE_STL_SHARED_MUTEX 1
    3154  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
    3155  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
    3156  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
    3157  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
    3158  #define VMA_USE_STL_SHARED_MUTEX 1
    3159  #else
    3160  #define VMA_USE_STL_SHARED_MUTEX 0
    3161  #endif
    3162 #endif
    3163 
    3164 #if VMA_USE_STL_VECTOR
    3165  #include <vector>
    3166 #endif
    3167 
    3168 #if VMA_USE_STL_UNORDERED_MAP
    3169  #include <unordered_map>
    3170 #endif
    3171 
    3172 #if VMA_USE_STL_LIST
    3173  #include <list>
    3174 #endif
    3175 
    3176 /*
    3177 Following headers are used in this CONFIGURATION section only, so feel free to
    3178 remove them if not needed.
    3179 */
    3180 #include <cassert> // for assert
    3181 #include <algorithm> // for min, max
    3182 #include <mutex>
    3183 #include <atomic> // for std::atomic
    3184 
    3185 #ifndef VMA_NULL
    3186  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    3187  #define VMA_NULL nullptr
    3188 #endif
    3189 
    3190 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
    3191 #include <cstdlib>
    3192 void *aligned_alloc(size_t alignment, size_t size)
    3193 {
    3194  // alignment must be >= sizeof(void*)
    3195  if(alignment < sizeof(void*))
    3196  {
    3197  alignment = sizeof(void*);
    3198  }
    3199 
    3200  return memalign(alignment, size);
    3201 }
    3202 #elif defined(__APPLE__) || defined(__ANDROID__)
    3203 #include <cstdlib>
    3204 void *aligned_alloc(size_t alignment, size_t size)
    3205 {
    3206  // alignment must be >= sizeof(void*)
    3207  if(alignment < sizeof(void*))
    3208  {
    3209  alignment = sizeof(void*);
    3210  }
    3211 
    3212  void *pointer;
    3213  if(posix_memalign(&pointer, alignment, size) == 0)
    3214  return pointer;
    3215  return VMA_NULL;
    3216 }
    3217 #endif
    3218 
    3219 // If your compiler is not compatible with C++11 and definition of
    3220 // aligned_alloc() function is missing, uncommeting following line may help:
    3221 
    3222 //#include <malloc.h>
    3223 
    3224 // Normal assert to check for programmer's errors, especially in Debug configuration.
    3225 #ifndef VMA_ASSERT
    3226  #ifdef _DEBUG
    3227  #define VMA_ASSERT(expr) assert(expr)
    3228  #else
    3229  #define VMA_ASSERT(expr)
    3230  #endif
    3231 #endif
    3232 
    3233 // Assert that will be called very often, like inside data structures e.g. operator[].
    3234 // Making it non-empty can make program slow.
    3235 #ifndef VMA_HEAVY_ASSERT
    3236  #ifdef _DEBUG
    3237  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    3238  #else
    3239  #define VMA_HEAVY_ASSERT(expr)
    3240  #endif
    3241 #endif
    3242 
    3243 #ifndef VMA_ALIGN_OF
    3244  #define VMA_ALIGN_OF(type) (__alignof(type))
    3245 #endif
    3246 
    3247 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    3248  #if defined(_WIN32)
    3249  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    3250  #else
    3251  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    3252  #endif
    3253 #endif
    3254 
    3255 #ifndef VMA_SYSTEM_FREE
    3256  #if defined(_WIN32)
    3257  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    3258  #else
    3259  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    3260  #endif
    3261 #endif
    3262 
    3263 #ifndef VMA_MIN
    3264  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    3265 #endif
    3266 
    3267 #ifndef VMA_MAX
    3268  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    3269 #endif
    3270 
    3271 #ifndef VMA_SWAP
    3272  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    3273 #endif
    3274 
    3275 #ifndef VMA_SORT
    3276  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    3277 #endif
    3278 
    3279 #ifndef VMA_DEBUG_LOG
    3280  #define VMA_DEBUG_LOG(format, ...)
    3281  /*
    3282  #define VMA_DEBUG_LOG(format, ...) do { \
    3283  printf(format, __VA_ARGS__); \
    3284  printf("\n"); \
    3285  } while(false)
    3286  */
    3287 #endif
    3288 
    3289 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    3290 #if VMA_STATS_STRING_ENABLED
    3291  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    3292  {
    3293  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    3294  }
    3295  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    3296  {
    3297  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    3298  }
    3299  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    3300  {
    3301  snprintf(outStr, strLen, "%p", ptr);
    3302  }
    3303 #endif
    3304 
    3305 #ifndef VMA_MUTEX
    3306  class VmaMutex
    3307  {
    3308  public:
    3309  void Lock() { m_Mutex.lock(); }
    3310  void Unlock() { m_Mutex.unlock(); }
    3311  private:
    3312  std::mutex m_Mutex;
    3313  };
    3314  #define VMA_MUTEX VmaMutex
    3315 #endif
    3316 
    3317 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
    3318 #ifndef VMA_RW_MUTEX
    3319  #if VMA_USE_STL_SHARED_MUTEX
    3320  // Use std::shared_mutex from C++17.
    3321  #include <shared_mutex>
    3322  class VmaRWMutex
    3323  {
    3324  public:
    3325  void LockRead() { m_Mutex.lock_shared(); }
    3326  void UnlockRead() { m_Mutex.unlock_shared(); }
    3327  void LockWrite() { m_Mutex.lock(); }
    3328  void UnlockWrite() { m_Mutex.unlock(); }
    3329  private:
    3330  std::shared_mutex m_Mutex;
    3331  };
    3332  #define VMA_RW_MUTEX VmaRWMutex
    3333  #elif defined(_WIN32)
    3334  // Use SRWLOCK from WinAPI.
    3335  class VmaRWMutex
    3336  {
    3337  public:
    3338  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
    3339  void LockRead() { AcquireSRWLockShared(&m_Lock); }
    3340  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
    3341  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
    3342  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
    3343  private:
    3344  SRWLOCK m_Lock;
    3345  };
    3346  #define VMA_RW_MUTEX VmaRWMutex
    3347  #else
    3348  // Less efficient fallback: Use normal mutex.
    3349  class VmaRWMutex
    3350  {
    3351  public:
    3352  void LockRead() { m_Mutex.Lock(); }
    3353  void UnlockRead() { m_Mutex.Unlock(); }
    3354  void LockWrite() { m_Mutex.Lock(); }
    3355  void UnlockWrite() { m_Mutex.Unlock(); }
    3356  private:
    3357  VMA_MUTEX m_Mutex;
    3358  };
    3359  #define VMA_RW_MUTEX VmaRWMutex
    3360  #endif // #if VMA_USE_STL_SHARED_MUTEX
    3361 #endif // #ifndef VMA_RW_MUTEX
    3362 
    3363 /*
    3364 If providing your own implementation, you need to implement a subset of std::atomic:
    3365 
    3366 - Constructor(uint32_t desired)
    3367 - uint32_t load() const
    3368 - void store(uint32_t desired)
    3369 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    3370 */
    3371 #ifndef VMA_ATOMIC_UINT32
    3372  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    3373 #endif
    3374 
    3375 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    3376 
    3380  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    3381 #endif
    3382 
    3383 #ifndef VMA_DEBUG_ALIGNMENT
    3384 
    3388  #define VMA_DEBUG_ALIGNMENT (1)
    3389 #endif
    3390 
    3391 #ifndef VMA_DEBUG_MARGIN
    3392 
    3396  #define VMA_DEBUG_MARGIN (0)
    3397 #endif
    3398 
    3399 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
    3400 
    3404  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
    3405 #endif
    3406 
    3407 #ifndef VMA_DEBUG_DETECT_CORRUPTION
    3408 
    3413  #define VMA_DEBUG_DETECT_CORRUPTION (0)
    3414 #endif
    3415 
    3416 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    3417 
    3421  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    3422 #endif
    3423 
    3424 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    3425 
    3429  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    3430 #endif
    3431 
    3432 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    3433  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
    3435 #endif
    3436 
    3437 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    3438  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
    3440 #endif
    3441 
    3442 #ifndef VMA_CLASS_NO_COPY
    3443  #define VMA_CLASS_NO_COPY(className) \
    3444  private: \
    3445  className(const className&) = delete; \
    3446  className& operator=(const className&) = delete;
    3447 #endif
    3448 
    3449 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    3450 
    3451 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
    3452 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
    3453 
    3454 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
    3455 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
    3456 
    3457 /*******************************************************************************
    3458 END OF CONFIGURATION
    3459 */
    3460 
    3461 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
    3462 
    3463 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    3464  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    3465 
    3466 // Returns number of bits set to 1 in (v).
    3467 static inline uint32_t VmaCountBitsSet(uint32_t v)
    3468 {
    3469  uint32_t c = v - ((v >> 1) & 0x55555555);
    3470  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    3471  c = ((c >> 4) + c) & 0x0F0F0F0F;
    3472  c = ((c >> 8) + c) & 0x00FF00FF;
    3473  c = ((c >> 16) + c) & 0x0000FFFF;
    3474  return c;
    3475 }
    3476 
    3477 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    3478 // Use types like uint32_t, uint64_t as T.
    3479 template <typename T>
    3480 static inline T VmaAlignUp(T val, T align)
    3481 {
    3482  return (val + align - 1) / align * align;
    3483 }
    3484 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
    3485 // Use types like uint32_t, uint64_t as T.
    3486 template <typename T>
    3487 static inline T VmaAlignDown(T val, T align)
    3488 {
    3489  return val / align * align;
    3490 }
    3491 
    3492 // Division with mathematical rounding to nearest number.
    3493 template <typename T>
    3494 static inline T VmaRoundDiv(T x, T y)
    3495 {
    3496  return (x + (y / (T)2)) / y;
    3497 }
    3498 
    3499 /*
    3500 Returns true if given number is a power of two.
    3501 T must be unsigned integer number or signed integer but always nonnegative.
    3502 For 0 returns true.
    3503 */
    3504 template <typename T>
    3505 inline bool VmaIsPow2(T x)
    3506 {
    3507  return (x & (x-1)) == 0;
    3508 }
    3509 
    3510 // Returns smallest power of 2 greater or equal to v.
    3511 static inline uint32_t VmaNextPow2(uint32_t v)
    3512 {
    3513  v--;
    3514  v |= v >> 1;
    3515  v |= v >> 2;
    3516  v |= v >> 4;
    3517  v |= v >> 8;
    3518  v |= v >> 16;
    3519  v++;
    3520  return v;
    3521 }
    3522 static inline uint64_t VmaNextPow2(uint64_t v)
    3523 {
    3524  v--;
    3525  v |= v >> 1;
    3526  v |= v >> 2;
    3527  v |= v >> 4;
    3528  v |= v >> 8;
    3529  v |= v >> 16;
    3530  v |= v >> 32;
    3531  v++;
    3532  return v;
    3533 }
    3534 
    3535 // Returns largest power of 2 less or equal to v.
    3536 static inline uint32_t VmaPrevPow2(uint32_t v)
    3537 {
    3538  v |= v >> 1;
    3539  v |= v >> 2;
    3540  v |= v >> 4;
    3541  v |= v >> 8;
    3542  v |= v >> 16;
    3543  v = v ^ (v >> 1);
    3544  return v;
    3545 }
    3546 static inline uint64_t VmaPrevPow2(uint64_t v)
    3547 {
    3548  v |= v >> 1;
    3549  v |= v >> 2;
    3550  v |= v >> 4;
    3551  v |= v >> 8;
    3552  v |= v >> 16;
    3553  v |= v >> 32;
    3554  v = v ^ (v >> 1);
    3555  return v;
    3556 }
    3557 
    3558 static inline bool VmaStrIsEmpty(const char* pStr)
    3559 {
    3560  return pStr == VMA_NULL || *pStr == '\0';
    3561 }
    3562 
    3563 static const char* VmaAlgorithmToStr(uint32_t algorithm)
    3564 {
    3565  switch(algorithm)
    3566  {
    3568  return "Linear";
    3570  return "Buddy";
    3571  case 0:
    3572  return "Default";
    3573  default:
    3574  VMA_ASSERT(0);
    3575  return "";
    3576  }
    3577 }
    3578 
    3579 #ifndef VMA_SORT
    3580 
    3581 template<typename Iterator, typename Compare>
    3582 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    3583 {
    3584  Iterator centerValue = end; --centerValue;
    3585  Iterator insertIndex = beg;
    3586  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    3587  {
    3588  if(cmp(*memTypeIndex, *centerValue))
    3589  {
    3590  if(insertIndex != memTypeIndex)
    3591  {
    3592  VMA_SWAP(*memTypeIndex, *insertIndex);
    3593  }
    3594  ++insertIndex;
    3595  }
    3596  }
    3597  if(insertIndex != centerValue)
    3598  {
    3599  VMA_SWAP(*insertIndex, *centerValue);
    3600  }
    3601  return insertIndex;
    3602 }
    3603 
    3604 template<typename Iterator, typename Compare>
    3605 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    3606 {
    3607  if(beg < end)
    3608  {
    3609  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    3610  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    3611  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    3612  }
    3613 }
    3614 
    3615 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    3616 
    3617 #endif // #ifndef VMA_SORT
    3618 
    3619 /*
    3620 Returns true if two memory blocks occupy overlapping pages.
    3621 ResourceA must be in less memory offset than ResourceB.
    3622 
    3623 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    3624 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    3625 */
    3626 static inline bool VmaBlocksOnSamePage(
    3627  VkDeviceSize resourceAOffset,
    3628  VkDeviceSize resourceASize,
    3629  VkDeviceSize resourceBOffset,
    3630  VkDeviceSize pageSize)
    3631 {
    3632  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    3633  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    3634  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    3635  VkDeviceSize resourceBStart = resourceBOffset;
    3636  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    3637  return resourceAEndPage == resourceBStartPage;
    3638 }
    3639 
    3640 enum VmaSuballocationType
    3641 {
    3642  VMA_SUBALLOCATION_TYPE_FREE = 0,
    3643  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    3644  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    3645  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    3646  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    3647  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    3648  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    3649 };
    3650 
    3651 /*
    3652 Returns true if given suballocation types could conflict and must respect
    3653 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    3654 or linear image and another one is optimal image. If type is unknown, behave
    3655 conservatively.
    3656 */
    3657 static inline bool VmaIsBufferImageGranularityConflict(
    3658  VmaSuballocationType suballocType1,
    3659  VmaSuballocationType suballocType2)
    3660 {
    3661  if(suballocType1 > suballocType2)
    3662  {
    3663  VMA_SWAP(suballocType1, suballocType2);
    3664  }
    3665 
    3666  switch(suballocType1)
    3667  {
    3668  case VMA_SUBALLOCATION_TYPE_FREE:
    3669  return false;
    3670  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    3671  return true;
    3672  case VMA_SUBALLOCATION_TYPE_BUFFER:
    3673  return
    3674  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3675  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3676  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    3677  return
    3678  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3679  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    3680  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3681  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    3682  return
    3683  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3684  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    3685  return false;
    3686  default:
    3687  VMA_ASSERT(0);
    3688  return true;
    3689  }
    3690 }
    3691 
    3692 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
    3693 {
    3694  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
    3695  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3696  for(size_t i = 0; i < numberCount; ++i, ++pDst)
    3697  {
    3698  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
    3699  }
    3700 }
    3701 
    3702 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
    3703 {
    3704  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
    3705  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3706  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
    3707  {
    3708  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
    3709  {
    3710  return false;
    3711  }
    3712  }
    3713  return true;
    3714 }
    3715 
    3716 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    3717 struct VmaMutexLock
    3718 {
    3719  VMA_CLASS_NO_COPY(VmaMutexLock)
    3720 public:
    3721  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
    3722  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3723  { if(m_pMutex) { m_pMutex->Lock(); } }
    3724  ~VmaMutexLock()
    3725  { if(m_pMutex) { m_pMutex->Unlock(); } }
    3726 private:
    3727  VMA_MUTEX* m_pMutex;
    3728 };
    3729 
    3730 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
    3731 struct VmaMutexLockRead
    3732 {
    3733  VMA_CLASS_NO_COPY(VmaMutexLockRead)
    3734 public:
    3735  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
    3736  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3737  { if(m_pMutex) { m_pMutex->LockRead(); } }
    3738  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
    3739 private:
    3740  VMA_RW_MUTEX* m_pMutex;
    3741 };
    3742 
    3743 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
    3744 struct VmaMutexLockWrite
    3745 {
    3746  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
    3747 public:
    3748  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
    3749  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3750  { if(m_pMutex) { m_pMutex->LockWrite(); } }
    3751  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
    3752 private:
    3753  VMA_RW_MUTEX* m_pMutex;
    3754 };
    3755 
    3756 #if VMA_DEBUG_GLOBAL_MUTEX
    3757  static VMA_MUTEX gDebugGlobalMutex;
    3758  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    3759 #else
    3760  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    3761 #endif
    3762 
    3763 // Minimum size of a free suballocation to register it in the free suballocation collection.
    3764 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    3765 
    3766 /*
    3767 Performs binary search and returns iterator to first element that is greater or
    3768 equal to (key), according to comparison (cmp).
    3769 
    3770 Cmp should return true if first argument is less than second argument.
    3771 
    3772 Returned value is the found element, if present in the collection or place where
    3773 new element with value (key) should be inserted.
    3774 */
    3775 template <typename CmpLess, typename IterT, typename KeyT>
    3776 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
    3777 {
    3778  size_t down = 0, up = (end - beg);
    3779  while(down < up)
    3780  {
    3781  const size_t mid = (down + up) / 2;
    3782  if(cmp(*(beg+mid), key))
    3783  {
    3784  down = mid + 1;
    3785  }
    3786  else
    3787  {
    3788  up = mid;
    3789  }
    3790  }
    3791  return beg + down;
    3792 }
    3793 
    3794 /*
    3795 Returns true if all pointers in the array are not-null and unique.
    3796 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
    3797 T must be pointer type, e.g. VmaAllocation, VmaPool.
    3798 */
    3799 template<typename T>
    3800 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
    3801 {
    3802  for(uint32_t i = 0; i < count; ++i)
    3803  {
    3804  const T iPtr = arr[i];
    3805  if(iPtr == VMA_NULL)
    3806  {
    3807  return false;
    3808  }
    3809  for(uint32_t j = i + 1; j < count; ++j)
    3810  {
    3811  if(iPtr == arr[j])
    3812  {
    3813  return false;
    3814  }
    3815  }
    3816  }
    3817  return true;
    3818 }
    3819 
    3821 // Memory allocation
    3822 
    3823 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    3824 {
    3825  if((pAllocationCallbacks != VMA_NULL) &&
    3826  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    3827  {
    3828  return (*pAllocationCallbacks->pfnAllocation)(
    3829  pAllocationCallbacks->pUserData,
    3830  size,
    3831  alignment,
    3832  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    3833  }
    3834  else
    3835  {
    3836  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    3837  }
    3838 }
    3839 
    3840 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    3841 {
    3842  if((pAllocationCallbacks != VMA_NULL) &&
    3843  (pAllocationCallbacks->pfnFree != VMA_NULL))
    3844  {
    3845  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    3846  }
    3847  else
    3848  {
    3849  VMA_SYSTEM_FREE(ptr);
    3850  }
    3851 }
    3852 
    3853 template<typename T>
    3854 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    3855 {
    3856  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    3857 }
    3858 
    3859 template<typename T>
    3860 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    3861 {
    3862  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    3863 }
    3864 
    3865 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    3866 
    3867 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    3868 
    3869 template<typename T>
    3870 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    3871 {
    3872  ptr->~T();
    3873  VmaFree(pAllocationCallbacks, ptr);
    3874 }
    3875 
    3876 template<typename T>
    3877 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    3878 {
    3879  if(ptr != VMA_NULL)
    3880  {
    3881  for(size_t i = count; i--; )
    3882  {
    3883  ptr[i].~T();
    3884  }
    3885  VmaFree(pAllocationCallbacks, ptr);
    3886  }
    3887 }
    3888 
    3889 // STL-compatible allocator.
    3890 template<typename T>
    3891 class VmaStlAllocator
    3892 {
    3893 public:
    3894  const VkAllocationCallbacks* const m_pCallbacks;
    3895  typedef T value_type;
    3896 
    3897  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    3898  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    3899 
    3900  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    3901  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    3902 
    3903  template<typename U>
    3904  bool operator==(const VmaStlAllocator<U>& rhs) const
    3905  {
    3906  return m_pCallbacks == rhs.m_pCallbacks;
    3907  }
    3908  template<typename U>
    3909  bool operator!=(const VmaStlAllocator<U>& rhs) const
    3910  {
    3911  return m_pCallbacks != rhs.m_pCallbacks;
    3912  }
    3913 
    3914  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    3915 };
    3916 
    3917 #if VMA_USE_STL_VECTOR
    3918 
    3919 #define VmaVector std::vector
    3920 
    3921 template<typename T, typename allocatorT>
    3922 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    3923 {
    3924  vec.insert(vec.begin() + index, item);
    3925 }
    3926 
    3927 template<typename T, typename allocatorT>
    3928 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    3929 {
    3930  vec.erase(vec.begin() + index);
    3931 }
    3932 
    3933 #else // #if VMA_USE_STL_VECTOR
    3934 
    3935 /* Class with interface compatible with subset of std::vector.
    3936 T must be POD because constructors and destructors are not called and memcpy is
    3937 used for these objects. */
    3938 template<typename T, typename AllocatorT>
    3939 class VmaVector
    3940 {
    3941 public:
    3942  typedef T value_type;
    3943 
    3944  VmaVector(const AllocatorT& allocator) :
    3945  m_Allocator(allocator),
    3946  m_pArray(VMA_NULL),
    3947  m_Count(0),
    3948  m_Capacity(0)
    3949  {
    3950  }
    3951 
    3952  VmaVector(size_t count, const AllocatorT& allocator) :
    3953  m_Allocator(allocator),
    3954  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    3955  m_Count(count),
    3956  m_Capacity(count)
    3957  {
    3958  }
    3959 
    3960  VmaVector(const VmaVector<T, AllocatorT>& src) :
    3961  m_Allocator(src.m_Allocator),
    3962  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    3963  m_Count(src.m_Count),
    3964  m_Capacity(src.m_Count)
    3965  {
    3966  if(m_Count != 0)
    3967  {
    3968  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    3969  }
    3970  }
    3971 
    3972  ~VmaVector()
    3973  {
    3974  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3975  }
    3976 
    3977  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    3978  {
    3979  if(&rhs != this)
    3980  {
    3981  resize(rhs.m_Count);
    3982  if(m_Count != 0)
    3983  {
    3984  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    3985  }
    3986  }
    3987  return *this;
    3988  }
    3989 
    3990  bool empty() const { return m_Count == 0; }
    3991  size_t size() const { return m_Count; }
    3992  T* data() { return m_pArray; }
    3993  const T* data() const { return m_pArray; }
    3994 
    3995  T& operator[](size_t index)
    3996  {
    3997  VMA_HEAVY_ASSERT(index < m_Count);
    3998  return m_pArray[index];
    3999  }
    4000  const T& operator[](size_t index) const
    4001  {
    4002  VMA_HEAVY_ASSERT(index < m_Count);
    4003  return m_pArray[index];
    4004  }
    4005 
    4006  T& front()
    4007  {
    4008  VMA_HEAVY_ASSERT(m_Count > 0);
    4009  return m_pArray[0];
    4010  }
    4011  const T& front() const
    4012  {
    4013  VMA_HEAVY_ASSERT(m_Count > 0);
    4014  return m_pArray[0];
    4015  }
    4016  T& back()
    4017  {
    4018  VMA_HEAVY_ASSERT(m_Count > 0);
    4019  return m_pArray[m_Count - 1];
    4020  }
    4021  const T& back() const
    4022  {
    4023  VMA_HEAVY_ASSERT(m_Count > 0);
    4024  return m_pArray[m_Count - 1];
    4025  }
    4026 
    4027  void reserve(size_t newCapacity, bool freeMemory = false)
    4028  {
    4029  newCapacity = VMA_MAX(newCapacity, m_Count);
    4030 
    4031  if((newCapacity < m_Capacity) && !freeMemory)
    4032  {
    4033  newCapacity = m_Capacity;
    4034  }
    4035 
    4036  if(newCapacity != m_Capacity)
    4037  {
    4038  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    4039  if(m_Count != 0)
    4040  {
    4041  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    4042  }
    4043  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    4044  m_Capacity = newCapacity;
    4045  m_pArray = newArray;
    4046  }
    4047  }
    4048 
    4049  void resize(size_t newCount, bool freeMemory = false)
    4050  {
    4051  size_t newCapacity = m_Capacity;
    4052  if(newCount > m_Capacity)
    4053  {
    4054  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    4055  }
    4056  else if(freeMemory)
    4057  {
    4058  newCapacity = newCount;
    4059  }
    4060 
    4061  if(newCapacity != m_Capacity)
    4062  {
    4063  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    4064  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    4065  if(elementsToCopy != 0)
    4066  {
    4067  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    4068  }
    4069  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    4070  m_Capacity = newCapacity;
    4071  m_pArray = newArray;
    4072  }
    4073 
    4074  m_Count = newCount;
    4075  }
    4076 
    4077  void clear(bool freeMemory = false)
    4078  {
    4079  resize(0, freeMemory);
    4080  }
    4081 
    4082  void insert(size_t index, const T& src)
    4083  {
    4084  VMA_HEAVY_ASSERT(index <= m_Count);
    4085  const size_t oldCount = size();
    4086  resize(oldCount + 1);
    4087  if(index < oldCount)
    4088  {
    4089  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    4090  }
    4091  m_pArray[index] = src;
    4092  }
    4093 
    4094  void remove(size_t index)
    4095  {
    4096  VMA_HEAVY_ASSERT(index < m_Count);
    4097  const size_t oldCount = size();
    4098  if(index < oldCount - 1)
    4099  {
    4100  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    4101  }
    4102  resize(oldCount - 1);
    4103  }
    4104 
    4105  void push_back(const T& src)
    4106  {
    4107  const size_t newIndex = size();
    4108  resize(newIndex + 1);
    4109  m_pArray[newIndex] = src;
    4110  }
    4111 
    4112  void pop_back()
    4113  {
    4114  VMA_HEAVY_ASSERT(m_Count > 0);
    4115  resize(size() - 1);
    4116  }
    4117 
    4118  void push_front(const T& src)
    4119  {
    4120  insert(0, src);
    4121  }
    4122 
    4123  void pop_front()
    4124  {
    4125  VMA_HEAVY_ASSERT(m_Count > 0);
    4126  remove(0);
    4127  }
    4128 
    4129  typedef T* iterator;
    4130 
    4131  iterator begin() { return m_pArray; }
    4132  iterator end() { return m_pArray + m_Count; }
    4133 
    4134 private:
    4135  AllocatorT m_Allocator;
    4136  T* m_pArray;
    4137  size_t m_Count;
    4138  size_t m_Capacity;
    4139 };
    4140 
    4141 template<typename T, typename allocatorT>
    4142 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    4143 {
    4144  vec.insert(index, item);
    4145 }
    4146 
    4147 template<typename T, typename allocatorT>
    4148 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    4149 {
    4150  vec.remove(index);
    4151 }
    4152 
    4153 #endif // #if VMA_USE_STL_VECTOR
    4154 
    4155 template<typename CmpLess, typename VectorT>
    4156 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    4157 {
    4158  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4159  vector.data(),
    4160  vector.data() + vector.size(),
    4161  value,
    4162  CmpLess()) - vector.data();
    4163  VmaVectorInsert(vector, indexToInsert, value);
    4164  return indexToInsert;
    4165 }
    4166 
    4167 template<typename CmpLess, typename VectorT>
    4168 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    4169 {
    4170  CmpLess comparator;
    4171  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    4172  vector.begin(),
    4173  vector.end(),
    4174  value,
    4175  comparator);
    4176  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    4177  {
    4178  size_t indexToRemove = it - vector.begin();
    4179  VmaVectorRemove(vector, indexToRemove);
    4180  return true;
    4181  }
    4182  return false;
    4183 }
    4184 
    4185 template<typename CmpLess, typename IterT, typename KeyT>
    4186 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
    4187 {
    4188  CmpLess comparator;
    4189  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
    4190  beg, end, value, comparator);
    4191  if(it == end ||
    4192  (!comparator(*it, value) && !comparator(value, *it)))
    4193  {
    4194  return it;
    4195  }
    4196  return end;
    4197 }
    4198 
    4200 // class VmaPoolAllocator
    4201 
    4202 /*
    4203 Allocator for objects of type T using a list of arrays (pools) to speed up
    4204 allocation. Number of elements that can be allocated is not bounded because
    4205 allocator can create multiple blocks.
    4206 */
    4207 template<typename T>
    4208 class VmaPoolAllocator
    4209 {
    4210  VMA_CLASS_NO_COPY(VmaPoolAllocator)
    4211 public:
    4212  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
    4213  ~VmaPoolAllocator();
    4214  void Clear();
    4215  T* Alloc();
    4216  void Free(T* ptr);
    4217 
    4218 private:
    4219  union Item
    4220  {
    4221  uint32_t NextFreeIndex;
    4222  T Value;
    4223  };
    4224 
    4225  struct ItemBlock
    4226  {
    4227  Item* pItems;
    4228  uint32_t FirstFreeIndex;
    4229  };
    4230 
    4231  const VkAllocationCallbacks* m_pAllocationCallbacks;
    4232  size_t m_ItemsPerBlock;
    4233  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    4234 
    4235  ItemBlock& CreateNewBlock();
    4236 };
    4237 
    4238 template<typename T>
    4239 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
    4240  m_pAllocationCallbacks(pAllocationCallbacks),
    4241  m_ItemsPerBlock(itemsPerBlock),
    4242  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    4243 {
    4244  VMA_ASSERT(itemsPerBlock > 0);
    4245 }
    4246 
    4247 template<typename T>
    4248 VmaPoolAllocator<T>::~VmaPoolAllocator()
    4249 {
    4250  Clear();
    4251 }
    4252 
    4253 template<typename T>
    4254 void VmaPoolAllocator<T>::Clear()
    4255 {
    4256  for(size_t i = m_ItemBlocks.size(); i--; )
    4257  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
    4258  m_ItemBlocks.clear();
    4259 }
    4260 
    4261 template<typename T>
    4262 T* VmaPoolAllocator<T>::Alloc()
    4263 {
    4264  for(size_t i = m_ItemBlocks.size(); i--; )
    4265  {
    4266  ItemBlock& block = m_ItemBlocks[i];
    4267  // This block has some free items: Use first one.
    4268  if(block.FirstFreeIndex != UINT32_MAX)
    4269  {
    4270  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    4271  block.FirstFreeIndex = pItem->NextFreeIndex;
    4272  return &pItem->Value;
    4273  }
    4274  }
    4275 
    4276  // No block has free item: Create new one and use it.
    4277  ItemBlock& newBlock = CreateNewBlock();
    4278  Item* const pItem = &newBlock.pItems[0];
    4279  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    4280  return &pItem->Value;
    4281 }
    4282 
    4283 template<typename T>
    4284 void VmaPoolAllocator<T>::Free(T* ptr)
    4285 {
    4286  // Search all memory blocks to find ptr.
    4287  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
    4288  {
    4289  ItemBlock& block = m_ItemBlocks[i];
    4290 
    4291  // Casting to union.
    4292  Item* pItemPtr;
    4293  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    4294 
    4295  // Check if pItemPtr is in address range of this block.
    4296  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
    4297  {
    4298  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    4299  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    4300  block.FirstFreeIndex = index;
    4301  return;
    4302  }
    4303  }
    4304  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    4305 }
    4306 
    4307 template<typename T>
    4308 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    4309 {
    4310  ItemBlock newBlock = {
    4311  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
    4312 
    4313  m_ItemBlocks.push_back(newBlock);
    4314 
    4315  // Setup singly-linked list of all free items in this block.
    4316  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
    4317  newBlock.pItems[i].NextFreeIndex = i + 1;
    4318  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
    4319  return m_ItemBlocks.back();
    4320 }
    4321 
    4323 // class VmaRawList, VmaList
    4324 
    4325 #if VMA_USE_STL_LIST
    4326 
    4327 #define VmaList std::list
    4328 
    4329 #else // #if VMA_USE_STL_LIST
    4330 
    4331 template<typename T>
    4332 struct VmaListItem
    4333 {
    4334  VmaListItem* pPrev;
    4335  VmaListItem* pNext;
    4336  T Value;
    4337 };
    4338 
    4339 // Doubly linked list.
    4340 template<typename T>
    4341 class VmaRawList
    4342 {
    4343  VMA_CLASS_NO_COPY(VmaRawList)
    4344 public:
    4345  typedef VmaListItem<T> ItemType;
    4346 
    4347  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    4348  ~VmaRawList();
    4349  void Clear();
    4350 
    4351  size_t GetCount() const { return m_Count; }
    4352  bool IsEmpty() const { return m_Count == 0; }
    4353 
    4354  ItemType* Front() { return m_pFront; }
    4355  const ItemType* Front() const { return m_pFront; }
    4356  ItemType* Back() { return m_pBack; }
    4357  const ItemType* Back() const { return m_pBack; }
    4358 
    4359  ItemType* PushBack();
    4360  ItemType* PushFront();
    4361  ItemType* PushBack(const T& value);
    4362  ItemType* PushFront(const T& value);
    4363  void PopBack();
    4364  void PopFront();
    4365 
    4366  // Item can be null - it means PushBack.
    4367  ItemType* InsertBefore(ItemType* pItem);
    4368  // Item can be null - it means PushFront.
    4369  ItemType* InsertAfter(ItemType* pItem);
    4370 
    4371  ItemType* InsertBefore(ItemType* pItem, const T& value);
    4372  ItemType* InsertAfter(ItemType* pItem, const T& value);
    4373 
    4374  void Remove(ItemType* pItem);
    4375 
    4376 private:
    4377  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    4378  VmaPoolAllocator<ItemType> m_ItemAllocator;
    4379  ItemType* m_pFront;
    4380  ItemType* m_pBack;
    4381  size_t m_Count;
    4382 };
    4383 
    4384 template<typename T>
    4385 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    4386  m_pAllocationCallbacks(pAllocationCallbacks),
    4387  m_ItemAllocator(pAllocationCallbacks, 128),
    4388  m_pFront(VMA_NULL),
    4389  m_pBack(VMA_NULL),
    4390  m_Count(0)
    4391 {
    4392 }
    4393 
    4394 template<typename T>
    4395 VmaRawList<T>::~VmaRawList()
    4396 {
    4397  // Intentionally not calling Clear, because that would be unnecessary
    4398  // computations to return all items to m_ItemAllocator as free.
    4399 }
    4400 
    4401 template<typename T>
    4402 void VmaRawList<T>::Clear()
    4403 {
    4404  if(IsEmpty() == false)
    4405  {
    4406  ItemType* pItem = m_pBack;
    4407  while(pItem != VMA_NULL)
    4408  {
    4409  ItemType* const pPrevItem = pItem->pPrev;
    4410  m_ItemAllocator.Free(pItem);
    4411  pItem = pPrevItem;
    4412  }
    4413  m_pFront = VMA_NULL;
    4414  m_pBack = VMA_NULL;
    4415  m_Count = 0;
    4416  }
    4417 }
    4418 
    4419 template<typename T>
    4420 VmaListItem<T>* VmaRawList<T>::PushBack()
    4421 {
    4422  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    4423  pNewItem->pNext = VMA_NULL;
    4424  if(IsEmpty())
    4425  {
    4426  pNewItem->pPrev = VMA_NULL;
    4427  m_pFront = pNewItem;
    4428  m_pBack = pNewItem;
    4429  m_Count = 1;
    4430  }
    4431  else
    4432  {
    4433  pNewItem->pPrev = m_pBack;
    4434  m_pBack->pNext = pNewItem;
    4435  m_pBack = pNewItem;
    4436  ++m_Count;
    4437  }
    4438  return pNewItem;
    4439 }
    4440 
    4441 template<typename T>
    4442 VmaListItem<T>* VmaRawList<T>::PushFront()
    4443 {
    4444  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    4445  pNewItem->pPrev = VMA_NULL;
    4446  if(IsEmpty())
    4447  {
    4448  pNewItem->pNext = VMA_NULL;
    4449  m_pFront = pNewItem;
    4450  m_pBack = pNewItem;
    4451  m_Count = 1;
    4452  }
    4453  else
    4454  {
    4455  pNewItem->pNext = m_pFront;
    4456  m_pFront->pPrev = pNewItem;
    4457  m_pFront = pNewItem;
    4458  ++m_Count;
    4459  }
    4460  return pNewItem;
    4461 }
    4462 
    4463 template<typename T>
    4464 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    4465 {
    4466  ItemType* const pNewItem = PushBack();
    4467  pNewItem->Value = value;
    4468  return pNewItem;
    4469 }
    4470 
    4471 template<typename T>
    4472 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    4473 {
    4474  ItemType* const pNewItem = PushFront();
    4475  pNewItem->Value = value;
    4476  return pNewItem;
    4477 }
    4478 
    4479 template<typename T>
    4480 void VmaRawList<T>::PopBack()
    4481 {
    4482  VMA_HEAVY_ASSERT(m_Count > 0);
    4483  ItemType* const pBackItem = m_pBack;
    4484  ItemType* const pPrevItem = pBackItem->pPrev;
    4485  if(pPrevItem != VMA_NULL)
    4486  {
    4487  pPrevItem->pNext = VMA_NULL;
    4488  }
    4489  m_pBack = pPrevItem;
    4490  m_ItemAllocator.Free(pBackItem);
    4491  --m_Count;
    4492 }
    4493 
    4494 template<typename T>
    4495 void VmaRawList<T>::PopFront()
    4496 {
    4497  VMA_HEAVY_ASSERT(m_Count > 0);
    4498  ItemType* const pFrontItem = m_pFront;
    4499  ItemType* const pNextItem = pFrontItem->pNext;
    4500  if(pNextItem != VMA_NULL)
    4501  {
    4502  pNextItem->pPrev = VMA_NULL;
    4503  }
    4504  m_pFront = pNextItem;
    4505  m_ItemAllocator.Free(pFrontItem);
    4506  --m_Count;
    4507 }
    4508 
    4509 template<typename T>
    4510 void VmaRawList<T>::Remove(ItemType* pItem)
    4511 {
    4512  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    4513  VMA_HEAVY_ASSERT(m_Count > 0);
    4514 
    4515  if(pItem->pPrev != VMA_NULL)
    4516  {
    4517  pItem->pPrev->pNext = pItem->pNext;
    4518  }
    4519  else
    4520  {
    4521  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4522  m_pFront = pItem->pNext;
    4523  }
    4524 
    4525  if(pItem->pNext != VMA_NULL)
    4526  {
    4527  pItem->pNext->pPrev = pItem->pPrev;
    4528  }
    4529  else
    4530  {
    4531  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4532  m_pBack = pItem->pPrev;
    4533  }
    4534 
    4535  m_ItemAllocator.Free(pItem);
    4536  --m_Count;
    4537 }
    4538 
    4539 template<typename T>
    4540 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    4541 {
    4542  if(pItem != VMA_NULL)
    4543  {
    4544  ItemType* const prevItem = pItem->pPrev;
    4545  ItemType* const newItem = m_ItemAllocator.Alloc();
    4546  newItem->pPrev = prevItem;
    4547  newItem->pNext = pItem;
    4548  pItem->pPrev = newItem;
    4549  if(prevItem != VMA_NULL)
    4550  {
    4551  prevItem->pNext = newItem;
    4552  }
    4553  else
    4554  {
    4555  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4556  m_pFront = newItem;
    4557  }
    4558  ++m_Count;
    4559  return newItem;
    4560  }
    4561  else
    4562  return PushBack();
    4563 }
    4564 
    4565 template<typename T>
    4566 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    4567 {
    4568  if(pItem != VMA_NULL)
    4569  {
    4570  ItemType* const nextItem = pItem->pNext;
    4571  ItemType* const newItem = m_ItemAllocator.Alloc();
    4572  newItem->pNext = nextItem;
    4573  newItem->pPrev = pItem;
    4574  pItem->pNext = newItem;
    4575  if(nextItem != VMA_NULL)
    4576  {
    4577  nextItem->pPrev = newItem;
    4578  }
    4579  else
    4580  {
    4581  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4582  m_pBack = newItem;
    4583  }
    4584  ++m_Count;
    4585  return newItem;
    4586  }
    4587  else
    4588  return PushFront();
    4589 }
    4590 
    4591 template<typename T>
    4592 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    4593 {
    4594  ItemType* const newItem = InsertBefore(pItem);
    4595  newItem->Value = value;
    4596  return newItem;
    4597 }
    4598 
    4599 template<typename T>
    4600 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    4601 {
    4602  ItemType* const newItem = InsertAfter(pItem);
    4603  newItem->Value = value;
    4604  return newItem;
    4605 }
    4606 
    4607 template<typename T, typename AllocatorT>
    4608 class VmaList
    4609 {
    4610  VMA_CLASS_NO_COPY(VmaList)
    4611 public:
    4612  class iterator
    4613  {
    4614  public:
    4615  iterator() :
    4616  m_pList(VMA_NULL),
    4617  m_pItem(VMA_NULL)
    4618  {
    4619  }
    4620 
    4621  T& operator*() const
    4622  {
    4623  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4624  return m_pItem->Value;
    4625  }
    4626  T* operator->() const
    4627  {
    4628  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4629  return &m_pItem->Value;
    4630  }
    4631 
    4632  iterator& operator++()
    4633  {
    4634  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4635  m_pItem = m_pItem->pNext;
    4636  return *this;
    4637  }
    4638  iterator& operator--()
    4639  {
    4640  if(m_pItem != VMA_NULL)
    4641  {
    4642  m_pItem = m_pItem->pPrev;
    4643  }
    4644  else
    4645  {
    4646  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4647  m_pItem = m_pList->Back();
    4648  }
    4649  return *this;
    4650  }
    4651 
    4652  iterator operator++(int)
    4653  {
    4654  iterator result = *this;
    4655  ++*this;
    4656  return result;
    4657  }
    4658  iterator operator--(int)
    4659  {
    4660  iterator result = *this;
    4661  --*this;
    4662  return result;
    4663  }
    4664 
    4665  bool operator==(const iterator& rhs) const
    4666  {
    4667  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4668  return m_pItem == rhs.m_pItem;
    4669  }
    4670  bool operator!=(const iterator& rhs) const
    4671  {
    4672  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4673  return m_pItem != rhs.m_pItem;
    4674  }
    4675 
    4676  private:
    4677  VmaRawList<T>* m_pList;
    4678  VmaListItem<T>* m_pItem;
    4679 
    4680  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    4681  m_pList(pList),
    4682  m_pItem(pItem)
    4683  {
    4684  }
    4685 
    4686  friend class VmaList<T, AllocatorT>;
    4687  };
    4688 
    4689  class const_iterator
    4690  {
    4691  public:
    4692  const_iterator() :
    4693  m_pList(VMA_NULL),
    4694  m_pItem(VMA_NULL)
    4695  {
    4696  }
    4697 
    4698  const_iterator(const iterator& src) :
    4699  m_pList(src.m_pList),
    4700  m_pItem(src.m_pItem)
    4701  {
    4702  }
    4703 
    4704  const T& operator*() const
    4705  {
    4706  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4707  return m_pItem->Value;
    4708  }
    4709  const T* operator->() const
    4710  {
    4711  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4712  return &m_pItem->Value;
    4713  }
    4714 
    4715  const_iterator& operator++()
    4716  {
    4717  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4718  m_pItem = m_pItem->pNext;
    4719  return *this;
    4720  }
    4721  const_iterator& operator--()
    4722  {
    4723  if(m_pItem != VMA_NULL)
    4724  {
    4725  m_pItem = m_pItem->pPrev;
    4726  }
    4727  else
    4728  {
    4729  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4730  m_pItem = m_pList->Back();
    4731  }
    4732  return *this;
    4733  }
    4734 
    4735  const_iterator operator++(int)
    4736  {
    4737  const_iterator result = *this;
    4738  ++*this;
    4739  return result;
    4740  }
    4741  const_iterator operator--(int)
    4742  {
    4743  const_iterator result = *this;
    4744  --*this;
    4745  return result;
    4746  }
    4747 
    4748  bool operator==(const const_iterator& rhs) const
    4749  {
    4750  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4751  return m_pItem == rhs.m_pItem;
    4752  }
    4753  bool operator!=(const const_iterator& rhs) const
    4754  {
    4755  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4756  return m_pItem != rhs.m_pItem;
    4757  }
    4758 
    4759  private:
    4760  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    4761  m_pList(pList),
    4762  m_pItem(pItem)
    4763  {
    4764  }
    4765 
    4766  const VmaRawList<T>* m_pList;
    4767  const VmaListItem<T>* m_pItem;
    4768 
    4769  friend class VmaList<T, AllocatorT>;
    4770  };
    4771 
    4772  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    4773 
    4774  bool empty() const { return m_RawList.IsEmpty(); }
    4775  size_t size() const { return m_RawList.GetCount(); }
    4776 
    4777  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    4778  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    4779 
    4780  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    4781  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    4782 
    4783  void clear() { m_RawList.Clear(); }
    4784  void push_back(const T& value) { m_RawList.PushBack(value); }
    4785  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    4786  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    4787 
    4788 private:
    4789  VmaRawList<T> m_RawList;
    4790 };
    4791 
    4792 #endif // #if VMA_USE_STL_LIST
    4793 
    4795 // class VmaMap
    4796 
    4797 // Unused in this version.
    4798 #if 0
    4799 
    4800 #if VMA_USE_STL_UNORDERED_MAP
    4801 
    4802 #define VmaPair std::pair
    4803 
    4804 #define VMA_MAP_TYPE(KeyT, ValueT) \
    4805  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    4806 
    4807 #else // #if VMA_USE_STL_UNORDERED_MAP
    4808 
    4809 template<typename T1, typename T2>
    4810 struct VmaPair
    4811 {
    4812  T1 first;
    4813  T2 second;
    4814 
    4815  VmaPair() : first(), second() { }
    4816  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    4817 };
    4818 
    4819 /* Class compatible with subset of interface of std::unordered_map.
    4820 KeyT, ValueT must be POD because they will be stored in VmaVector.
    4821 */
    4822 template<typename KeyT, typename ValueT>
    4823 class VmaMap
    4824 {
    4825 public:
    4826  typedef VmaPair<KeyT, ValueT> PairType;
    4827  typedef PairType* iterator;
    4828 
    4829  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    4830 
    4831  iterator begin() { return m_Vector.begin(); }
    4832  iterator end() { return m_Vector.end(); }
    4833 
    4834  void insert(const PairType& pair);
    4835  iterator find(const KeyT& key);
    4836  void erase(iterator it);
    4837 
    4838 private:
    4839  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    4840 };
    4841 
    4842 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    4843 
    4844 template<typename FirstT, typename SecondT>
    4845 struct VmaPairFirstLess
    4846 {
    4847  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    4848  {
    4849  return lhs.first < rhs.first;
    4850  }
    4851  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    4852  {
    4853  return lhs.first < rhsFirst;
    4854  }
    4855 };
    4856 
    4857 template<typename KeyT, typename ValueT>
    4858 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    4859 {
    4860  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4861  m_Vector.data(),
    4862  m_Vector.data() + m_Vector.size(),
    4863  pair,
    4864  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    4865  VmaVectorInsert(m_Vector, indexToInsert, pair);
    4866 }
    4867 
    4868 template<typename KeyT, typename ValueT>
    4869 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    4870 {
    4871  PairType* it = VmaBinaryFindFirstNotLess(
    4872  m_Vector.data(),
    4873  m_Vector.data() + m_Vector.size(),
    4874  key,
    4875  VmaPairFirstLess<KeyT, ValueT>());
    4876  if((it != m_Vector.end()) && (it->first == key))
    4877  {
    4878  return it;
    4879  }
    4880  else
    4881  {
    4882  return m_Vector.end();
    4883  }
    4884 }
    4885 
    4886 template<typename KeyT, typename ValueT>
    4887 void VmaMap<KeyT, ValueT>::erase(iterator it)
    4888 {
    4889  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    4890 }
    4891 
    4892 #endif // #if VMA_USE_STL_UNORDERED_MAP
    4893 
    4894 #endif // #if 0
    4895 
    4897 
    4898 class VmaDeviceMemoryBlock;
    4899 
    4900 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
    4901 
    4902 struct VmaAllocation_T
    4903 {
    4904  VMA_CLASS_NO_COPY(VmaAllocation_T)
    4905 private:
    4906  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    4907 
    4908  enum FLAGS
    4909  {
    4910  FLAG_USER_DATA_STRING = 0x01,
    4911  };
    4912 
    4913 public:
    4914  enum ALLOCATION_TYPE
    4915  {
    4916  ALLOCATION_TYPE_NONE,
    4917  ALLOCATION_TYPE_BLOCK,
    4918  ALLOCATION_TYPE_DEDICATED,
    4919  };
    4920 
    4921  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
    4922  m_Alignment(1),
    4923  m_Size(0),
    4924  m_pUserData(VMA_NULL),
    4925  m_LastUseFrameIndex(currentFrameIndex),
    4926  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
    4927  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
    4928  m_MapCount(0),
    4929  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
    4930  {
    4931 #if VMA_STATS_STRING_ENABLED
    4932  m_CreationFrameIndex = currentFrameIndex;
    4933  m_BufferImageUsage = 0;
    4934 #endif
    4935  }
    4936 
    4937  ~VmaAllocation_T()
    4938  {
    4939  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    4940 
    4941  // Check if owned string was freed.
    4942  VMA_ASSERT(m_pUserData == VMA_NULL);
    4943  }
    4944 
    4945  void InitBlockAllocation(
    4946  VmaPool hPool,
    4947  VmaDeviceMemoryBlock* block,
    4948  VkDeviceSize offset,
    4949  VkDeviceSize alignment,
    4950  VkDeviceSize size,
    4951  VmaSuballocationType suballocationType,
    4952  bool mapped,
    4953  bool canBecomeLost)
    4954  {
    4955  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4956  VMA_ASSERT(block != VMA_NULL);
    4957  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4958  m_Alignment = alignment;
    4959  m_Size = size;
    4960  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4961  m_SuballocationType = (uint8_t)suballocationType;
    4962  m_BlockAllocation.m_hPool = hPool;
    4963  m_BlockAllocation.m_Block = block;
    4964  m_BlockAllocation.m_Offset = offset;
    4965  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    4966  }
    4967 
    4968  void InitLost()
    4969  {
    4970  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4971  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    4972  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4973  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
    4974  m_BlockAllocation.m_Block = VMA_NULL;
    4975  m_BlockAllocation.m_Offset = 0;
    4976  m_BlockAllocation.m_CanBecomeLost = true;
    4977  }
    4978 
    4979  void ChangeBlockAllocation(
    4980  VmaAllocator hAllocator,
    4981  VmaDeviceMemoryBlock* block,
    4982  VkDeviceSize offset);
    4983 
    4984  void ChangeSize(VkDeviceSize newSize);
    4985  void ChangeOffset(VkDeviceSize newOffset);
    4986 
    4987  // pMappedData not null means allocation is created with MAPPED flag.
    4988  void InitDedicatedAllocation(
    4989  uint32_t memoryTypeIndex,
    4990  VkDeviceMemory hMemory,
    4991  VmaSuballocationType suballocationType,
    4992  void* pMappedData,
    4993  VkDeviceSize size)
    4994  {
    4995  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4996  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    4997  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    4998  m_Alignment = 0;
    4999  m_Size = size;
    5000  m_SuballocationType = (uint8_t)suballocationType;
    5001  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    5002  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    5003  m_DedicatedAllocation.m_hMemory = hMemory;
    5004  m_DedicatedAllocation.m_pMappedData = pMappedData;
    5005  }
    5006 
    5007  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    5008  VkDeviceSize GetAlignment() const { return m_Alignment; }
    5009  VkDeviceSize GetSize() const { return m_Size; }
    5010  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
    5011  void* GetUserData() const { return m_pUserData; }
    5012  void SetUserData(VmaAllocator hAllocator, void* pUserData);
    5013  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    5014 
    5015  VmaDeviceMemoryBlock* GetBlock() const
    5016  {
    5017  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    5018  return m_BlockAllocation.m_Block;
    5019  }
    5020  VkDeviceSize GetOffset() const;
    5021  VkDeviceMemory GetMemory() const;
    5022  uint32_t GetMemoryTypeIndex() const;
    5023  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    5024  void* GetMappedData() const;
    5025  bool CanBecomeLost() const;
    5026  VmaPool GetPool() const;
    5027 
    5028  uint32_t GetLastUseFrameIndex() const
    5029  {
    5030  return m_LastUseFrameIndex.load();
    5031  }
    5032  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    5033  {
    5034  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    5035  }
    5036  /*
    5037  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    5038  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    5039  - Else, returns false.
    5040 
    5041  If hAllocation is already lost, assert - you should not call it then.
    5042  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    5043  */
    5044  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5045 
    5046  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    5047  {
    5048  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    5049  outInfo.blockCount = 1;
    5050  outInfo.allocationCount = 1;
    5051  outInfo.unusedRangeCount = 0;
    5052  outInfo.usedBytes = m_Size;
    5053  outInfo.unusedBytes = 0;
    5054  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    5055  outInfo.unusedRangeSizeMin = UINT64_MAX;
    5056  outInfo.unusedRangeSizeMax = 0;
    5057  }
    5058 
    5059  void BlockAllocMap();
    5060  void BlockAllocUnmap();
    5061  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    5062  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    5063 
    5064 #if VMA_STATS_STRING_ENABLED
    5065  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
    5066  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
    5067 
    5068  void InitBufferImageUsage(uint32_t bufferImageUsage)
    5069  {
    5070  VMA_ASSERT(m_BufferImageUsage == 0);
    5071  m_BufferImageUsage = bufferImageUsage;
    5072  }
    5073 
    5074  void PrintParameters(class VmaJsonWriter& json) const;
    5075 #endif
    5076 
    5077 private:
    5078  VkDeviceSize m_Alignment;
    5079  VkDeviceSize m_Size;
    5080  void* m_pUserData;
    5081  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    5082  uint8_t m_Type; // ALLOCATION_TYPE
    5083  uint8_t m_SuballocationType; // VmaSuballocationType
    5084  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    5085  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
    5086  uint8_t m_MapCount;
    5087  uint8_t m_Flags; // enum FLAGS
    5088 
    5089  // Allocation out of VmaDeviceMemoryBlock.
    5090  struct BlockAllocation
    5091  {
    5092  VmaPool m_hPool; // Null if belongs to general memory.
    5093  VmaDeviceMemoryBlock* m_Block;
    5094  VkDeviceSize m_Offset;
    5095  bool m_CanBecomeLost;
    5096  };
    5097 
    5098  // Allocation for an object that has its own private VkDeviceMemory.
    5099  struct DedicatedAllocation
    5100  {
    5101  uint32_t m_MemoryTypeIndex;
    5102  VkDeviceMemory m_hMemory;
    5103  void* m_pMappedData; // Not null means memory is mapped.
    5104  };
    5105 
    5106  union
    5107  {
    5108  // Allocation out of VmaDeviceMemoryBlock.
    5109  BlockAllocation m_BlockAllocation;
    5110  // Allocation for an object that has its own private VkDeviceMemory.
    5111  DedicatedAllocation m_DedicatedAllocation;
    5112  };
    5113 
    5114 #if VMA_STATS_STRING_ENABLED
    5115  uint32_t m_CreationFrameIndex;
    5116  uint32_t m_BufferImageUsage; // 0 if unknown.
    5117 #endif
    5118 
    5119  void FreeUserDataString(VmaAllocator hAllocator);
    5120 };
    5121 
    5122 /*
    5123 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    5124 allocated memory block or free.
    5125 */
    5126 struct VmaSuballocation
    5127 {
    5128  VkDeviceSize offset;
    5129  VkDeviceSize size;
    5130  VmaAllocation hAllocation;
    5131  VmaSuballocationType type;
    5132 };
    5133 
    5134 // Comparator for offsets.
    5135 struct VmaSuballocationOffsetLess
    5136 {
    5137  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    5138  {
    5139  return lhs.offset < rhs.offset;
    5140  }
    5141 };
    5142 struct VmaSuballocationOffsetGreater
    5143 {
    5144  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    5145  {
    5146  return lhs.offset > rhs.offset;
    5147  }
    5148 };
    5149 
    5150 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    5151 
    5152 // Cost of one additional allocation lost, as equivalent in bytes.
    5153 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    5154 
    5155 /*
    5156 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    5157 
    5158 If canMakeOtherLost was false:
    5159 - item points to a FREE suballocation.
    5160 - itemsToMakeLostCount is 0.
    5161 
    5162 If canMakeOtherLost was true:
    5163 - item points to first of sequence of suballocations, which are either FREE,
    5164  or point to VmaAllocations that can become lost.
    5165 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    5166  the requested allocation to succeed.
    5167 */
    5168 struct VmaAllocationRequest
    5169 {
    5170  VkDeviceSize offset;
    5171  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    5172  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    5173  VmaSuballocationList::iterator item;
    5174  size_t itemsToMakeLostCount;
    5175  void* customData;
    5176 
    5177  VkDeviceSize CalcCost() const
    5178  {
    5179  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    5180  }
    5181 };
    5182 
    5183 /*
    5184 Data structure used for bookkeeping of allocations and unused ranges of memory
    5185 in a single VkDeviceMemory block.
    5186 */
    5187 class VmaBlockMetadata
    5188 {
    5189 public:
    5190  VmaBlockMetadata(VmaAllocator hAllocator);
    5191  virtual ~VmaBlockMetadata() { }
    5192  virtual void Init(VkDeviceSize size) { m_Size = size; }
    5193 
    5194  // Validates all data structures inside this object. If not valid, returns false.
    5195  virtual bool Validate() const = 0;
    5196  VkDeviceSize GetSize() const { return m_Size; }
    5197  virtual size_t GetAllocationCount() const = 0;
    5198  virtual VkDeviceSize GetSumFreeSize() const = 0;
    5199  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
    5200  // Returns true if this block is empty - contains only single free suballocation.
    5201  virtual bool IsEmpty() const = 0;
    5202 
    5203  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
    5204  // Shouldn't modify blockCount.
    5205  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
    5206 
    5207 #if VMA_STATS_STRING_ENABLED
    5208  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
    5209 #endif
    5210 
    5211  // Tries to find a place for suballocation with given parameters inside this block.
    5212  // If succeeded, fills pAllocationRequest and returns true.
    5213  // If failed, returns false.
    5214  virtual bool CreateAllocationRequest(
    5215  uint32_t currentFrameIndex,
    5216  uint32_t frameInUseCount,
    5217  VkDeviceSize bufferImageGranularity,
    5218  VkDeviceSize allocSize,
    5219  VkDeviceSize allocAlignment,
    5220  bool upperAddress,
    5221  VmaSuballocationType allocType,
    5222  bool canMakeOtherLost,
    5223  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
    5224  uint32_t strategy,
    5225  VmaAllocationRequest* pAllocationRequest) = 0;
    5226 
    5227  virtual bool MakeRequestedAllocationsLost(
    5228  uint32_t currentFrameIndex,
    5229  uint32_t frameInUseCount,
    5230  VmaAllocationRequest* pAllocationRequest) = 0;
    5231 
    5232  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
    5233 
    5234  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
    5235 
    5236  // Makes actual allocation based on request. Request must already be checked and valid.
    5237  virtual void Alloc(
    5238  const VmaAllocationRequest& request,
    5239  VmaSuballocationType type,
    5240  VkDeviceSize allocSize,
    5241  bool upperAddress,
    5242  VmaAllocation hAllocation) = 0;
    5243 
    5244  // Frees suballocation assigned to given memory region.
    5245  virtual void Free(const VmaAllocation allocation) = 0;
    5246  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
    5247 
    5248  // Tries to resize (grow or shrink) space for given allocation, in place.
    5249  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
    5250 
    5251 protected:
    5252  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
    5253 
    5254 #if VMA_STATS_STRING_ENABLED
    5255  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
    5256  VkDeviceSize unusedBytes,
    5257  size_t allocationCount,
    5258  size_t unusedRangeCount) const;
    5259  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    5260  VkDeviceSize offset,
    5261  VmaAllocation hAllocation) const;
    5262  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    5263  VkDeviceSize offset,
    5264  VkDeviceSize size) const;
    5265  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
    5266 #endif
    5267 
    5268 private:
    5269  VkDeviceSize m_Size;
    5270  const VkAllocationCallbacks* m_pAllocationCallbacks;
    5271 };
    5272 
    5273 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
    5274  VMA_ASSERT(0 && "Validation failed: " #cond); \
    5275  return false; \
    5276  } } while(false)
    5277 
    5278 class VmaBlockMetadata_Generic : public VmaBlockMetadata
    5279 {
    5280  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
    5281 public:
    5282  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
    5283  virtual ~VmaBlockMetadata_Generic();
    5284  virtual void Init(VkDeviceSize size);
    5285 
    5286  virtual bool Validate() const;
    5287  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    5288  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    5289  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5290  virtual bool IsEmpty() const;
    5291 
    5292  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5293  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5294 
    5295 #if VMA_STATS_STRING_ENABLED
    5296  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5297 #endif
    5298 
    5299  virtual bool CreateAllocationRequest(
    5300  uint32_t currentFrameIndex,
    5301  uint32_t frameInUseCount,
    5302  VkDeviceSize bufferImageGranularity,
    5303  VkDeviceSize allocSize,
    5304  VkDeviceSize allocAlignment,
    5305  bool upperAddress,
    5306  VmaSuballocationType allocType,
    5307  bool canMakeOtherLost,
    5308  uint32_t strategy,
    5309  VmaAllocationRequest* pAllocationRequest);
    5310 
    5311  virtual bool MakeRequestedAllocationsLost(
    5312  uint32_t currentFrameIndex,
    5313  uint32_t frameInUseCount,
    5314  VmaAllocationRequest* pAllocationRequest);
    5315 
    5316  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5317 
    5318  virtual VkResult CheckCorruption(const void* pBlockData);
    5319 
    5320  virtual void Alloc(
    5321  const VmaAllocationRequest& request,
    5322  VmaSuballocationType type,
    5323  VkDeviceSize allocSize,
    5324  bool upperAddress,
    5325  VmaAllocation hAllocation);
    5326 
    5327  virtual void Free(const VmaAllocation allocation);
    5328  virtual void FreeAtOffset(VkDeviceSize offset);
    5329 
    5330  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
    5331 
    5333  // For defragmentation
    5334 
    5335  bool IsBufferImageGranularityConflictPossible(
    5336  VkDeviceSize bufferImageGranularity,
    5337  VmaSuballocationType& inOutPrevSuballocType) const;
    5338 
    5339 private:
    5340  friend class VmaDefragmentationAlgorithm_Generic;
    5341  friend class VmaDefragmentationAlgorithm_Fast;
    5342 
    5343  uint32_t m_FreeCount;
    5344  VkDeviceSize m_SumFreeSize;
    5345  VmaSuballocationList m_Suballocations;
    5346  // Suballocations that are free and have size greater than certain threshold.
    5347  // Sorted by size, ascending.
    5348  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    5349 
    5350  bool ValidateFreeSuballocationList() const;
    5351 
    5352  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    5353  // If yes, fills pOffset and returns true. If no, returns false.
    5354  bool CheckAllocation(
    5355  uint32_t currentFrameIndex,
    5356  uint32_t frameInUseCount,
    5357  VkDeviceSize bufferImageGranularity,
    5358  VkDeviceSize allocSize,
    5359  VkDeviceSize allocAlignment,
    5360  VmaSuballocationType allocType,
    5361  VmaSuballocationList::const_iterator suballocItem,
    5362  bool canMakeOtherLost,
    5363  VkDeviceSize* pOffset,
    5364  size_t* itemsToMakeLostCount,
    5365  VkDeviceSize* pSumFreeSize,
    5366  VkDeviceSize* pSumItemSize) const;
    5367  // Given free suballocation, it merges it with following one, which must also be free.
    5368  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    5369  // Releases given suballocation, making it free.
    5370  // Merges it with adjacent free suballocations if applicable.
    5371  // Returns iterator to new free suballocation at this place.
    5372  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    5373  // Given free suballocation, it inserts it into sorted list of
    5374  // m_FreeSuballocationsBySize if it's suitable.
    5375  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    5376  // Given free suballocation, it removes it from sorted list of
    5377  // m_FreeSuballocationsBySize if it's suitable.
    5378  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    5379 };
    5380 
    5381 /*
    5382 Allocations and their references in internal data structure look like this:
    5383 
    5384 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
    5385 
    5386  0 +-------+
    5387  | |
    5388  | |
    5389  | |
    5390  +-------+
    5391  | Alloc | 1st[m_1stNullItemsBeginCount]
    5392  +-------+
    5393  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    5394  +-------+
    5395  | ... |
    5396  +-------+
    5397  | Alloc | 1st[1st.size() - 1]
    5398  +-------+
    5399  | |
    5400  | |
    5401  | |
    5402 GetSize() +-------+
    5403 
    5404 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
    5405 
    5406  0 +-------+
    5407  | Alloc | 2nd[0]
    5408  +-------+
    5409  | Alloc | 2nd[1]
    5410  +-------+
    5411  | ... |
    5412  +-------+
    5413  | Alloc | 2nd[2nd.size() - 1]
    5414  +-------+
    5415  | |
    5416  | |
    5417  | |
    5418  +-------+
    5419  | Alloc | 1st[m_1stNullItemsBeginCount]
    5420  +-------+
    5421  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    5422  +-------+
    5423  | ... |
    5424  +-------+
    5425  | Alloc | 1st[1st.size() - 1]
    5426  +-------+
    5427  | |
    5428 GetSize() +-------+
    5429 
    5430 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
    5431 
    5432  0 +-------+
    5433  | |
    5434  | |
    5435  | |
    5436  +-------+
    5437  | Alloc | 1st[m_1stNullItemsBeginCount]
    5438  +-------+
    5439  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    5440  +-------+
    5441  | ... |
    5442  +-------+
    5443  | Alloc | 1st[1st.size() - 1]
    5444  +-------+
    5445  | |
    5446  | |
    5447  | |
    5448  +-------+
    5449  | Alloc | 2nd[2nd.size() - 1]
    5450  +-------+
    5451  | ... |
    5452  +-------+
    5453  | Alloc | 2nd[1]
    5454  +-------+
    5455  | Alloc | 2nd[0]
    5456 GetSize() +-------+
    5457 
    5458 */
    5459 class VmaBlockMetadata_Linear : public VmaBlockMetadata
    5460 {
    5461  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
    5462 public:
    5463  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
    5464  virtual ~VmaBlockMetadata_Linear();
    5465  virtual void Init(VkDeviceSize size);
    5466 
    5467  virtual bool Validate() const;
    5468  virtual size_t GetAllocationCount() const;
    5469  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    5470  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5471  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
    5472 
    5473  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5474  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5475 
    5476 #if VMA_STATS_STRING_ENABLED
    5477  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5478 #endif
    5479 
    5480  virtual bool CreateAllocationRequest(
    5481  uint32_t currentFrameIndex,
    5482  uint32_t frameInUseCount,
    5483  VkDeviceSize bufferImageGranularity,
    5484  VkDeviceSize allocSize,
    5485  VkDeviceSize allocAlignment,
    5486  bool upperAddress,
    5487  VmaSuballocationType allocType,
    5488  bool canMakeOtherLost,
    5489  uint32_t strategy,
    5490  VmaAllocationRequest* pAllocationRequest);
    5491 
    5492  virtual bool MakeRequestedAllocationsLost(
    5493  uint32_t currentFrameIndex,
    5494  uint32_t frameInUseCount,
    5495  VmaAllocationRequest* pAllocationRequest);
    5496 
    5497  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5498 
    5499  virtual VkResult CheckCorruption(const void* pBlockData);
    5500 
    5501  virtual void Alloc(
    5502  const VmaAllocationRequest& request,
    5503  VmaSuballocationType type,
    5504  VkDeviceSize allocSize,
    5505  bool upperAddress,
    5506  VmaAllocation hAllocation);
    5507 
    5508  virtual void Free(const VmaAllocation allocation);
    5509  virtual void FreeAtOffset(VkDeviceSize offset);
    5510 
    5511 private:
    5512  /*
    5513  There are two suballocation vectors, used in ping-pong way.
    5514  The one with index m_1stVectorIndex is called 1st.
    5515  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
    5516  2nd can be non-empty only when 1st is not empty.
    5517  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
    5518  */
    5519  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
    5520 
    5521  enum SECOND_VECTOR_MODE
    5522  {
    5523  SECOND_VECTOR_EMPTY,
    5524  /*
    5525  Suballocations in 2nd vector are created later than the ones in 1st, but they
    5526  all have smaller offset.
    5527  */
    5528  SECOND_VECTOR_RING_BUFFER,
    5529  /*
    5530  Suballocations in 2nd vector are upper side of double stack.
    5531  They all have offsets higher than those in 1st vector.
    5532  Top of this stack means smaller offsets, but higher indices in this vector.
    5533  */
    5534  SECOND_VECTOR_DOUBLE_STACK,
    5535  };
    5536 
    5537  VkDeviceSize m_SumFreeSize;
    5538  SuballocationVectorType m_Suballocations0, m_Suballocations1;
    5539  uint32_t m_1stVectorIndex;
    5540  SECOND_VECTOR_MODE m_2ndVectorMode;
    5541 
    5542  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5543  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5544  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5545  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5546 
    5547  // Number of items in 1st vector with hAllocation = null at the beginning.
    5548  size_t m_1stNullItemsBeginCount;
    5549  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
    5550  size_t m_1stNullItemsMiddleCount;
    5551  // Number of items in 2nd vector with hAllocation = null.
    5552  size_t m_2ndNullItemsCount;
    5553 
    5554  bool ShouldCompact1st() const;
    5555  void CleanupAfterFree();
    5556 };
    5557 
    5558 /*
    5559 - GetSize() is the original size of allocated memory block.
    5560 - m_UsableSize is this size aligned down to a power of two.
    5561  All allocations and calculations happen relative to m_UsableSize.
    5562 - GetUnusableSize() is the difference between them.
    5563  It is repoted as separate, unused range, not available for allocations.
    5564 
    5565 Node at level 0 has size = m_UsableSize.
    5566 Each next level contains nodes with size 2 times smaller than current level.
    5567 m_LevelCount is the maximum number of levels to use in the current object.
    5568 */
    5569 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
    5570 {
    5571  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
    5572 public:
    5573  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
    5574  virtual ~VmaBlockMetadata_Buddy();
    5575  virtual void Init(VkDeviceSize size);
    5576 
    5577  virtual bool Validate() const;
    5578  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
    5579  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
    5580  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5581  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
    5582 
    5583  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5584  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5585 
    5586 #if VMA_STATS_STRING_ENABLED
    5587  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5588 #endif
    5589 
    5590  virtual bool CreateAllocationRequest(
    5591  uint32_t currentFrameIndex,
    5592  uint32_t frameInUseCount,
    5593  VkDeviceSize bufferImageGranularity,
    5594  VkDeviceSize allocSize,
    5595  VkDeviceSize allocAlignment,
    5596  bool upperAddress,
    5597  VmaSuballocationType allocType,
    5598  bool canMakeOtherLost,
    5599  uint32_t strategy,
    5600  VmaAllocationRequest* pAllocationRequest);
    5601 
    5602  virtual bool MakeRequestedAllocationsLost(
    5603  uint32_t currentFrameIndex,
    5604  uint32_t frameInUseCount,
    5605  VmaAllocationRequest* pAllocationRequest);
    5606 
    5607  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5608 
    5609  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
    5610 
    5611  virtual void Alloc(
    5612  const VmaAllocationRequest& request,
    5613  VmaSuballocationType type,
    5614  VkDeviceSize allocSize,
    5615  bool upperAddress,
    5616  VmaAllocation hAllocation);
    5617 
    5618  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
    5619  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
    5620 
    5621 private:
    5622  static const VkDeviceSize MIN_NODE_SIZE = 32;
    5623  static const size_t MAX_LEVELS = 30;
    5624 
    5625  struct ValidationContext
    5626  {
    5627  size_t calculatedAllocationCount;
    5628  size_t calculatedFreeCount;
    5629  VkDeviceSize calculatedSumFreeSize;
    5630 
    5631  ValidationContext() :
    5632  calculatedAllocationCount(0),
    5633  calculatedFreeCount(0),
    5634  calculatedSumFreeSize(0) { }
    5635  };
    5636 
    5637  struct Node
    5638  {
    5639  VkDeviceSize offset;
    5640  enum TYPE
    5641  {
    5642  TYPE_FREE,
    5643  TYPE_ALLOCATION,
    5644  TYPE_SPLIT,
    5645  TYPE_COUNT
    5646  } type;
    5647  Node* parent;
    5648  Node* buddy;
    5649 
    5650  union
    5651  {
    5652  struct
    5653  {
    5654  Node* prev;
    5655  Node* next;
    5656  } free;
    5657  struct
    5658  {
    5659  VmaAllocation alloc;
    5660  } allocation;
    5661  struct
    5662  {
    5663  Node* leftChild;
    5664  } split;
    5665  };
    5666  };
    5667 
    5668  // Size of the memory block aligned down to a power of two.
    5669  VkDeviceSize m_UsableSize;
    5670  uint32_t m_LevelCount;
    5671 
    5672  Node* m_Root;
    5673  struct {
    5674  Node* front;
    5675  Node* back;
    5676  } m_FreeList[MAX_LEVELS];
    5677  // Number of nodes in the tree with type == TYPE_ALLOCATION.
    5678  size_t m_AllocationCount;
    5679  // Number of nodes in the tree with type == TYPE_FREE.
    5680  size_t m_FreeCount;
    5681  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
    5682  VkDeviceSize m_SumFreeSize;
    5683 
    5684  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
    5685  void DeleteNode(Node* node);
    5686  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
    5687  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
    5688  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
    5689  // Alloc passed just for validation. Can be null.
    5690  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
    5691  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
    5692  // Adds node to the front of FreeList at given level.
    5693  // node->type must be FREE.
    5694  // node->free.prev, next can be undefined.
    5695  void AddToFreeListFront(uint32_t level, Node* node);
    5696  // Removes node from FreeList at given level.
    5697  // node->type must be FREE.
    5698  // node->free.prev, next stay untouched.
    5699  void RemoveFromFreeList(uint32_t level, Node* node);
    5700 
    5701 #if VMA_STATS_STRING_ENABLED
    5702  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
    5703 #endif
    5704 };
    5705 
    5706 /*
    5707 Represents a single block of device memory (`VkDeviceMemory`) with all the
    5708 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
    5709 
    5710 Thread-safety: This class must be externally synchronized.
    5711 */
    5712 class VmaDeviceMemoryBlock
    5713 {
    5714  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
    5715 public:
    5716  VmaBlockMetadata* m_pMetadata;
    5717 
    5718  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    5719 
    5720  ~VmaDeviceMemoryBlock()
    5721  {
    5722  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    5723  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5724  }
    5725 
    5726  // Always call after construction.
    5727  void Init(
    5728  VmaAllocator hAllocator,
    5729  uint32_t newMemoryTypeIndex,
    5730  VkDeviceMemory newMemory,
    5731  VkDeviceSize newSize,
    5732  uint32_t id,
    5733  uint32_t algorithm);
    5734  // Always call before destruction.
    5735  void Destroy(VmaAllocator allocator);
    5736 
    5737  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
    5738  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5739  uint32_t GetId() const { return m_Id; }
    5740  void* GetMappedData() const { return m_pMappedData; }
    5741 
    5742  // Validates all data structures inside this object. If not valid, returns false.
    5743  bool Validate() const;
    5744 
    5745  VkResult CheckCorruption(VmaAllocator hAllocator);
    5746 
    5747  // ppData can be null.
    5748  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
    5749  void Unmap(VmaAllocator hAllocator, uint32_t count);
    5750 
    5751  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5752  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5753 
    5754  VkResult BindBufferMemory(
    5755  const VmaAllocator hAllocator,
    5756  const VmaAllocation hAllocation,
    5757  VkBuffer hBuffer);
    5758  VkResult BindImageMemory(
    5759  const VmaAllocator hAllocator,
    5760  const VmaAllocation hAllocation,
    5761  VkImage hImage);
    5762 
    5763 private:
    5764  uint32_t m_MemoryTypeIndex;
    5765  uint32_t m_Id;
    5766  VkDeviceMemory m_hMemory;
    5767 
    5768  /*
    5769  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
    5770  Also protects m_MapCount, m_pMappedData.
    5771  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
    5772  */
    5773  VMA_MUTEX m_Mutex;
    5774  uint32_t m_MapCount;
    5775  void* m_pMappedData;
    5776 };
    5777 
    5778 struct VmaPointerLess
    5779 {
    5780  bool operator()(const void* lhs, const void* rhs) const
    5781  {
    5782  return lhs < rhs;
    5783  }
    5784 };
    5785 
    5786 struct VmaDefragmentationMove
    5787 {
    5788  size_t srcBlockIndex;
    5789  size_t dstBlockIndex;
    5790  VkDeviceSize srcOffset;
    5791  VkDeviceSize dstOffset;
    5792  VkDeviceSize size;
    5793 };
    5794 
    5795 class VmaDefragmentationAlgorithm;
    5796 
    5797 /*
    5798 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    5799 Vulkan memory type.
    5800 
    5801 Synchronized internally with a mutex.
    5802 */
    5803 struct VmaBlockVector
    5804 {
    5805  VMA_CLASS_NO_COPY(VmaBlockVector)
    5806 public:
    5807  VmaBlockVector(
    5808  VmaAllocator hAllocator,
    5809  uint32_t memoryTypeIndex,
    5810  VkDeviceSize preferredBlockSize,
    5811  size_t minBlockCount,
    5812  size_t maxBlockCount,
    5813  VkDeviceSize bufferImageGranularity,
    5814  uint32_t frameInUseCount,
    5815  bool isCustomPool,
    5816  bool explicitBlockSize,
    5817  uint32_t algorithm);
    5818  ~VmaBlockVector();
    5819 
    5820  VkResult CreateMinBlocks();
    5821 
    5822  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5823  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    5824  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    5825  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    5826  uint32_t GetAlgorithm() const { return m_Algorithm; }
    5827 
    5828  void GetPoolStats(VmaPoolStats* pStats);
    5829 
    5830  bool IsEmpty() const { return m_Blocks.empty(); }
    5831  bool IsCorruptionDetectionEnabled() const;
    5832 
    5833  VkResult Allocate(
    5834  VmaPool hCurrentPool,
    5835  uint32_t currentFrameIndex,
    5836  VkDeviceSize size,
    5837  VkDeviceSize alignment,
    5838  const VmaAllocationCreateInfo& createInfo,
    5839  VmaSuballocationType suballocType,
    5840  size_t allocationCount,
    5841  VmaAllocation* pAllocations);
    5842 
    5843  void Free(
    5844  VmaAllocation hAllocation);
    5845 
    5846  // Adds statistics of this BlockVector to pStats.
    5847  void AddStats(VmaStats* pStats);
    5848 
    5849 #if VMA_STATS_STRING_ENABLED
    5850  void PrintDetailedMap(class VmaJsonWriter& json);
    5851 #endif
    5852 
    5853  void MakePoolAllocationsLost(
    5854  uint32_t currentFrameIndex,
    5855  size_t* pLostAllocationCount);
    5856  VkResult CheckCorruption();
    5857 
    5858  // Saves results in pCtx->res.
    5859  void Defragment(
    5860  class VmaBlockVectorDefragmentationContext* pCtx,
    5861  VmaDefragmentationStats* pStats,
    5862  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
    5863  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
    5864  VkCommandBuffer commandBuffer);
    5865  void DefragmentationEnd(
    5866  class VmaBlockVectorDefragmentationContext* pCtx,
    5867  VmaDefragmentationStats* pStats);
    5868 
    5870  // To be used only while the m_Mutex is locked. Used during defragmentation.
    5871 
    5872  size_t GetBlockCount() const { return m_Blocks.size(); }
    5873  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
    5874  size_t CalcAllocationCount() const;
    5875  bool IsBufferImageGranularityConflictPossible() const;
    5876 
    5877 private:
    5878  friend class VmaDefragmentationAlgorithm_Generic;
    5879 
    5880  const VmaAllocator m_hAllocator;
    5881  const uint32_t m_MemoryTypeIndex;
    5882  const VkDeviceSize m_PreferredBlockSize;
    5883  const size_t m_MinBlockCount;
    5884  const size_t m_MaxBlockCount;
    5885  const VkDeviceSize m_BufferImageGranularity;
    5886  const uint32_t m_FrameInUseCount;
    5887  const bool m_IsCustomPool;
    5888  const bool m_ExplicitBlockSize;
    5889  const uint32_t m_Algorithm;
    5890  /* There can be at most one allocation that is completely empty - a
    5891  hysteresis to avoid pessimistic case of alternating creation and destruction
    5892  of a VkDeviceMemory. */
    5893  bool m_HasEmptyBlock;
    5894  VMA_RW_MUTEX m_Mutex;
    5895  // Incrementally sorted by sumFreeSize, ascending.
    5896  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    5897  uint32_t m_NextBlockId;
    5898 
    5899  VkDeviceSize CalcMaxBlockSize() const;
    5900 
    5901  // Finds and removes given block from vector.
    5902  void Remove(VmaDeviceMemoryBlock* pBlock);
    5903 
    5904  // Performs single step in sorting m_Blocks. They may not be fully sorted
    5905  // after this call.
    5906  void IncrementallySortBlocks();
    5907 
    5908  VkResult AllocatePage(
    5909  VmaPool hCurrentPool,
    5910  uint32_t currentFrameIndex,
    5911  VkDeviceSize size,
    5912  VkDeviceSize alignment,
    5913  const VmaAllocationCreateInfo& createInfo,
    5914  VmaSuballocationType suballocType,
    5915  VmaAllocation* pAllocation);
    5916 
    5917  // To be used only without CAN_MAKE_OTHER_LOST flag.
    5918  VkResult AllocateFromBlock(
    5919  VmaDeviceMemoryBlock* pBlock,
    5920  VmaPool hCurrentPool,
    5921  uint32_t currentFrameIndex,
    5922  VkDeviceSize size,
    5923  VkDeviceSize alignment,
    5924  VmaAllocationCreateFlags allocFlags,
    5925  void* pUserData,
    5926  VmaSuballocationType suballocType,
    5927  uint32_t strategy,
    5928  VmaAllocation* pAllocation);
    5929 
    5930  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    5931 
    5932  // Saves result to pCtx->res.
    5933  void ApplyDefragmentationMovesCpu(
    5934  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    5935  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
    5936  // Saves result to pCtx->res.
    5937  void ApplyDefragmentationMovesGpu(
    5938  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    5939  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    5940  VkCommandBuffer commandBuffer);
    5941 
    5942  /*
    5943  Used during defragmentation. pDefragmentationStats is optional. It's in/out
    5944  - updated with new data.
    5945  */
    5946  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
    5947 };
    5948 
    5949 struct VmaPool_T
    5950 {
    5951  VMA_CLASS_NO_COPY(VmaPool_T)
    5952 public:
    5953  VmaBlockVector m_BlockVector;
    5954 
    5955  VmaPool_T(
    5956  VmaAllocator hAllocator,
    5957  const VmaPoolCreateInfo& createInfo,
    5958  VkDeviceSize preferredBlockSize);
    5959  ~VmaPool_T();
    5960 
    5961  uint32_t GetId() const { return m_Id; }
    5962  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
    5963 
    5964 #if VMA_STATS_STRING_ENABLED
    5965  //void PrintDetailedMap(class VmaStringBuilder& sb);
    5966 #endif
    5967 
    5968 private:
    5969  uint32_t m_Id;
    5970 };
    5971 
    5972 /*
    5973 Performs defragmentation:
    5974 
    5975 - Updates `pBlockVector->m_pMetadata`.
    5976 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
    5977 - Does not move actual data, only returns requested moves as `moves`.
    5978 */
    5979 class VmaDefragmentationAlgorithm
    5980 {
    5981  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
    5982 public:
    5983  VmaDefragmentationAlgorithm(
    5984  VmaAllocator hAllocator,
    5985  VmaBlockVector* pBlockVector,
    5986  uint32_t currentFrameIndex) :
    5987  m_hAllocator(hAllocator),
    5988  m_pBlockVector(pBlockVector),
    5989  m_CurrentFrameIndex(currentFrameIndex)
    5990  {
    5991  }
    5992  virtual ~VmaDefragmentationAlgorithm()
    5993  {
    5994  }
    5995 
    5996  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
    5997  virtual void AddAll() = 0;
    5998 
    5999  virtual VkResult Defragment(
    6000  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6001  VkDeviceSize maxBytesToMove,
    6002  uint32_t maxAllocationsToMove) = 0;
    6003 
    6004  virtual VkDeviceSize GetBytesMoved() const = 0;
    6005  virtual uint32_t GetAllocationsMoved() const = 0;
    6006 
    6007 protected:
    6008  VmaAllocator const m_hAllocator;
    6009  VmaBlockVector* const m_pBlockVector;
    6010  const uint32_t m_CurrentFrameIndex;
    6011 
    6012  struct AllocationInfo
    6013  {
    6014  VmaAllocation m_hAllocation;
    6015  VkBool32* m_pChanged;
    6016 
    6017  AllocationInfo() :
    6018  m_hAllocation(VK_NULL_HANDLE),
    6019  m_pChanged(VMA_NULL)
    6020  {
    6021  }
    6022  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
    6023  m_hAllocation(hAlloc),
    6024  m_pChanged(pChanged)
    6025  {
    6026  }
    6027  };
    6028 };
    6029 
    6030 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
    6031 {
    6032  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
    6033 public:
    6034  VmaDefragmentationAlgorithm_Generic(
    6035  VmaAllocator hAllocator,
    6036  VmaBlockVector* pBlockVector,
    6037  uint32_t currentFrameIndex,
    6038  bool overlappingMoveSupported);
    6039  virtual ~VmaDefragmentationAlgorithm_Generic();
    6040 
    6041  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    6042  virtual void AddAll() { m_AllAllocations = true; }
    6043 
    6044  virtual VkResult Defragment(
    6045  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6046  VkDeviceSize maxBytesToMove,
    6047  uint32_t maxAllocationsToMove);
    6048 
    6049  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    6050  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    6051 
    6052 private:
    6053  uint32_t m_AllocationCount;
    6054  bool m_AllAllocations;
    6055 
    6056  VkDeviceSize m_BytesMoved;
    6057  uint32_t m_AllocationsMoved;
    6058 
    6059  struct AllocationInfoSizeGreater
    6060  {
    6061  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    6062  {
    6063  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    6064  }
    6065  };
    6066 
    6067  struct AllocationInfoOffsetGreater
    6068  {
    6069  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    6070  {
    6071  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
    6072  }
    6073  };
    6074 
    6075  struct BlockInfo
    6076  {
    6077  size_t m_OriginalBlockIndex;
    6078  VmaDeviceMemoryBlock* m_pBlock;
    6079  bool m_HasNonMovableAllocations;
    6080  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    6081 
    6082  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    6083  m_OriginalBlockIndex(SIZE_MAX),
    6084  m_pBlock(VMA_NULL),
    6085  m_HasNonMovableAllocations(true),
    6086  m_Allocations(pAllocationCallbacks)
    6087  {
    6088  }
    6089 
    6090  void CalcHasNonMovableAllocations()
    6091  {
    6092  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
    6093  const size_t defragmentAllocCount = m_Allocations.size();
    6094  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    6095  }
    6096 
    6097  void SortAllocationsBySizeDescending()
    6098  {
    6099  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    6100  }
    6101 
    6102  void SortAllocationsByOffsetDescending()
    6103  {
    6104  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
    6105  }
    6106  };
    6107 
    6108  struct BlockPointerLess
    6109  {
    6110  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    6111  {
    6112  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    6113  }
    6114  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    6115  {
    6116  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    6117  }
    6118  };
    6119 
    6120  // 1. Blocks with some non-movable allocations go first.
    6121  // 2. Blocks with smaller sumFreeSize go first.
    6122  struct BlockInfoCompareMoveDestination
    6123  {
    6124  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    6125  {
    6126  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    6127  {
    6128  return true;
    6129  }
    6130  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    6131  {
    6132  return false;
    6133  }
    6134  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
    6135  {
    6136  return true;
    6137  }
    6138  return false;
    6139  }
    6140  };
    6141 
    6142  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    6143  BlockInfoVector m_Blocks;
    6144 
    6145  VkResult DefragmentRound(
    6146  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6147  VkDeviceSize maxBytesToMove,
    6148  uint32_t maxAllocationsToMove);
    6149 
    6150  size_t CalcBlocksWithNonMovableCount() const;
    6151 
    6152  static bool MoveMakesSense(
    6153  size_t dstBlockIndex, VkDeviceSize dstOffset,
    6154  size_t srcBlockIndex, VkDeviceSize srcOffset);
    6155 };
    6156 
    6157 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
    6158 {
    6159  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
    6160 public:
    6161  VmaDefragmentationAlgorithm_Fast(
    6162  VmaAllocator hAllocator,
    6163  VmaBlockVector* pBlockVector,
    6164  uint32_t currentFrameIndex,
    6165  bool overlappingMoveSupported);
    6166  virtual ~VmaDefragmentationAlgorithm_Fast();
    6167 
    6168  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
    6169  virtual void AddAll() { m_AllAllocations = true; }
    6170 
    6171  virtual VkResult Defragment(
    6172  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6173  VkDeviceSize maxBytesToMove,
    6174  uint32_t maxAllocationsToMove);
    6175 
    6176  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    6177  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    6178 
    6179 private:
    6180  struct BlockInfo
    6181  {
    6182  size_t origBlockIndex;
    6183  };
    6184 
    6185  class FreeSpaceDatabase
    6186  {
    6187  public:
    6188  FreeSpaceDatabase()
    6189  {
    6190  FreeSpace s = {};
    6191  s.blockInfoIndex = SIZE_MAX;
    6192  for(size_t i = 0; i < MAX_COUNT; ++i)
    6193  {
    6194  m_FreeSpaces[i] = s;
    6195  }
    6196  }
    6197 
    6198  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
    6199  {
    6200  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6201  {
    6202  return;
    6203  }
    6204 
    6205  // Find first invalid or the smallest structure.
    6206  size_t bestIndex = SIZE_MAX;
    6207  for(size_t i = 0; i < MAX_COUNT; ++i)
    6208  {
    6209  // Empty structure.
    6210  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
    6211  {
    6212  bestIndex = i;
    6213  break;
    6214  }
    6215  if(m_FreeSpaces[i].size < size &&
    6216  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
    6217  {
    6218  bestIndex = i;
    6219  }
    6220  }
    6221 
    6222  if(bestIndex != SIZE_MAX)
    6223  {
    6224  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
    6225  m_FreeSpaces[bestIndex].offset = offset;
    6226  m_FreeSpaces[bestIndex].size = size;
    6227  }
    6228  }
    6229 
    6230  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
    6231  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
    6232  {
    6233  size_t bestIndex = SIZE_MAX;
    6234  VkDeviceSize bestFreeSpaceAfter = 0;
    6235  for(size_t i = 0; i < MAX_COUNT; ++i)
    6236  {
    6237  // Structure is valid.
    6238  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
    6239  {
    6240  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
    6241  // Allocation fits into this structure.
    6242  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
    6243  {
    6244  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
    6245  (dstOffset + size);
    6246  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
    6247  {
    6248  bestIndex = i;
    6249  bestFreeSpaceAfter = freeSpaceAfter;
    6250  }
    6251  }
    6252  }
    6253  }
    6254 
    6255  if(bestIndex != SIZE_MAX)
    6256  {
    6257  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
    6258  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
    6259 
    6260  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6261  {
    6262  // Leave this structure for remaining empty space.
    6263  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
    6264  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
    6265  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
    6266  }
    6267  else
    6268  {
    6269  // This structure becomes invalid.
    6270  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
    6271  }
    6272 
    6273  return true;
    6274  }
    6275 
    6276  return false;
    6277  }
    6278 
    6279  private:
    6280  static const size_t MAX_COUNT = 4;
    6281 
    6282  struct FreeSpace
    6283  {
    6284  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
    6285  VkDeviceSize offset;
    6286  VkDeviceSize size;
    6287  } m_FreeSpaces[MAX_COUNT];
    6288  };
    6289 
    6290  const bool m_OverlappingMoveSupported;
    6291 
    6292  uint32_t m_AllocationCount;
    6293  bool m_AllAllocations;
    6294 
    6295  VkDeviceSize m_BytesMoved;
    6296  uint32_t m_AllocationsMoved;
    6297 
    6298  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
    6299 
    6300  void PreprocessMetadata();
    6301  void PostprocessMetadata();
    6302  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
    6303 };
    6304 
    6305 struct VmaBlockDefragmentationContext
    6306 {
    6307  enum BLOCK_FLAG
    6308  {
    6309  BLOCK_FLAG_USED = 0x00000001,
    6310  };
    6311  uint32_t flags;
    6312  VkBuffer hBuffer;
    6313 
    6314  VmaBlockDefragmentationContext() :
    6315  flags(0),
    6316  hBuffer(VK_NULL_HANDLE)
    6317  {
    6318  }
    6319 };
    6320 
    6321 class VmaBlockVectorDefragmentationContext
    6322 {
    6323  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
    6324 public:
    6325  VkResult res;
    6326  bool mutexLocked;
    6327  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
    6328 
    6329  VmaBlockVectorDefragmentationContext(
    6330  VmaAllocator hAllocator,
    6331  VmaPool hCustomPool, // Optional.
    6332  VmaBlockVector* pBlockVector,
    6333  uint32_t currFrameIndex,
    6334  uint32_t flags);
    6335  ~VmaBlockVectorDefragmentationContext();
    6336 
    6337  VmaPool GetCustomPool() const { return m_hCustomPool; }
    6338  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
    6339  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
    6340 
    6341  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    6342  void AddAll() { m_AllAllocations = true; }
    6343 
    6344  void Begin(bool overlappingMoveSupported);
    6345 
    6346 private:
    6347  const VmaAllocator m_hAllocator;
    6348  // Null if not from custom pool.
    6349  const VmaPool m_hCustomPool;
    6350  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
    6351  VmaBlockVector* const m_pBlockVector;
    6352  const uint32_t m_CurrFrameIndex;
    6353  const uint32_t m_AlgorithmFlags;
    6354  // Owner of this object.
    6355  VmaDefragmentationAlgorithm* m_pAlgorithm;
    6356 
    6357  struct AllocInfo
    6358  {
    6359  VmaAllocation hAlloc;
    6360  VkBool32* pChanged;
    6361  };
    6362  // Used between constructor and Begin.
    6363  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
    6364  bool m_AllAllocations;
    6365 };
    6366 
    6367 struct VmaDefragmentationContext_T
    6368 {
    6369 private:
    6370  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
    6371 public:
    6372  VmaDefragmentationContext_T(
    6373  VmaAllocator hAllocator,
    6374  uint32_t currFrameIndex,
    6375  uint32_t flags,
    6376  VmaDefragmentationStats* pStats);
    6377  ~VmaDefragmentationContext_T();
    6378 
    6379  void AddPools(uint32_t poolCount, VmaPool* pPools);
    6380  void AddAllocations(
    6381  uint32_t allocationCount,
    6382  VmaAllocation* pAllocations,
    6383  VkBool32* pAllocationsChanged);
    6384 
    6385  /*
    6386  Returns:
    6387  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
    6388  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
    6389  - Negative value if error occured and object can be destroyed immediately.
    6390  */
    6391  VkResult Defragment(
    6392  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
    6393  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
    6394  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
    6395 
    6396 private:
    6397  const VmaAllocator m_hAllocator;
    6398  const uint32_t m_CurrFrameIndex;
    6399  const uint32_t m_Flags;
    6400  VmaDefragmentationStats* const m_pStats;
    6401  // Owner of these objects.
    6402  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
    6403  // Owner of these objects.
    6404  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
    6405 };
    6406 
    6407 #if VMA_RECORDING_ENABLED
    6408 
    6409 class VmaRecorder
    6410 {
    6411 public:
    6412  VmaRecorder();
    6413  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
    6414  void WriteConfiguration(
    6415  const VkPhysicalDeviceProperties& devProps,
    6416  const VkPhysicalDeviceMemoryProperties& memProps,
    6417  bool dedicatedAllocationExtensionEnabled);
    6418  ~VmaRecorder();
    6419 
    6420  void RecordCreateAllocator(uint32_t frameIndex);
    6421  void RecordDestroyAllocator(uint32_t frameIndex);
    6422  void RecordCreatePool(uint32_t frameIndex,
    6423  const VmaPoolCreateInfo& createInfo,
    6424  VmaPool pool);
    6425  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
    6426  void RecordAllocateMemory(uint32_t frameIndex,
    6427  const VkMemoryRequirements& vkMemReq,
    6428  const VmaAllocationCreateInfo& createInfo,
    6429  VmaAllocation allocation);
    6430  void RecordAllocateMemoryPages(uint32_t frameIndex,
    6431  const VkMemoryRequirements& vkMemReq,
    6432  const VmaAllocationCreateInfo& createInfo,
    6433  uint64_t allocationCount,
    6434  const VmaAllocation* pAllocations);
    6435  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    6436  const VkMemoryRequirements& vkMemReq,
    6437  bool requiresDedicatedAllocation,
    6438  bool prefersDedicatedAllocation,
    6439  const VmaAllocationCreateInfo& createInfo,
    6440  VmaAllocation allocation);
    6441  void RecordAllocateMemoryForImage(uint32_t frameIndex,
    6442  const VkMemoryRequirements& vkMemReq,
    6443  bool requiresDedicatedAllocation,
    6444  bool prefersDedicatedAllocation,
    6445  const VmaAllocationCreateInfo& createInfo,
    6446  VmaAllocation allocation);
    6447  void RecordFreeMemory(uint32_t frameIndex,
    6448  VmaAllocation allocation);
    6449  void RecordFreeMemoryPages(uint32_t frameIndex,
    6450  uint64_t allocationCount,
    6451  const VmaAllocation* pAllocations);
    6452  void RecordResizeAllocation(
    6453  uint32_t frameIndex,
    6454  VmaAllocation allocation,
    6455  VkDeviceSize newSize);
    6456  void RecordSetAllocationUserData(uint32_t frameIndex,
    6457  VmaAllocation allocation,
    6458  const void* pUserData);
    6459  void RecordCreateLostAllocation(uint32_t frameIndex,
    6460  VmaAllocation allocation);
    6461  void RecordMapMemory(uint32_t frameIndex,
    6462  VmaAllocation allocation);
    6463  void RecordUnmapMemory(uint32_t frameIndex,
    6464  VmaAllocation allocation);
    6465  void RecordFlushAllocation(uint32_t frameIndex,
    6466  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    6467  void RecordInvalidateAllocation(uint32_t frameIndex,
    6468  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    6469  void RecordCreateBuffer(uint32_t frameIndex,
    6470  const VkBufferCreateInfo& bufCreateInfo,
    6471  const VmaAllocationCreateInfo& allocCreateInfo,
    6472  VmaAllocation allocation);
    6473  void RecordCreateImage(uint32_t frameIndex,
    6474  const VkImageCreateInfo& imageCreateInfo,
    6475  const VmaAllocationCreateInfo& allocCreateInfo,
    6476  VmaAllocation allocation);
    6477  void RecordDestroyBuffer(uint32_t frameIndex,
    6478  VmaAllocation allocation);
    6479  void RecordDestroyImage(uint32_t frameIndex,
    6480  VmaAllocation allocation);
    6481  void RecordTouchAllocation(uint32_t frameIndex,
    6482  VmaAllocation allocation);
    6483  void RecordGetAllocationInfo(uint32_t frameIndex,
    6484  VmaAllocation allocation);
    6485  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
    6486  VmaPool pool);
    6487  void RecordDefragmentationBegin(uint32_t frameIndex,
    6488  const VmaDefragmentationInfo2& info,
    6490  void RecordDefragmentationEnd(uint32_t frameIndex,
    6492 
    6493 private:
    6494  struct CallParams
    6495  {
    6496  uint32_t threadId;
    6497  double time;
    6498  };
    6499 
    6500  class UserDataString
    6501  {
    6502  public:
    6503  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
    6504  const char* GetString() const { return m_Str; }
    6505 
    6506  private:
    6507  char m_PtrStr[17];
    6508  const char* m_Str;
    6509  };
    6510 
    6511  bool m_UseMutex;
    6512  VmaRecordFlags m_Flags;
    6513  FILE* m_File;
    6514  VMA_MUTEX m_FileMutex;
    6515  int64_t m_Freq;
    6516  int64_t m_StartCounter;
    6517 
    6518  void GetBasicParams(CallParams& outParams);
    6519 
    6520  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
    6521  template<typename T>
    6522  void PrintPointerList(uint64_t count, const T* pItems)
    6523  {
    6524  if(count)
    6525  {
    6526  fprintf(m_File, "%p", pItems[0]);
    6527  for(uint64_t i = 1; i < count; ++i)
    6528  {
    6529  fprintf(m_File, " %p", pItems[i]);
    6530  }
    6531  }
    6532  }
    6533 
    6534  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
    6535  void Flush();
    6536 };
    6537 
    6538 #endif // #if VMA_RECORDING_ENABLED
    6539 
    6540 // Main allocator object.
    6541 struct VmaAllocator_T
    6542 {
    6543  VMA_CLASS_NO_COPY(VmaAllocator_T)
    6544 public:
    6545  bool m_UseMutex;
    6546  bool m_UseKhrDedicatedAllocation;
    6547  VkDevice m_hDevice;
    6548  bool m_AllocationCallbacksSpecified;
    6549  VkAllocationCallbacks m_AllocationCallbacks;
    6550  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    6551 
    6552  // Number of bytes free out of limit, or VK_WHOLE_SIZE if no limit for that heap.
    6553  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    6554  VMA_MUTEX m_HeapSizeLimitMutex;
    6555 
    6556  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    6557  VkPhysicalDeviceMemoryProperties m_MemProps;
    6558 
    6559  // Default pools.
    6560  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    6561 
    6562  // Each vector is sorted by memory (handle value).
    6563  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    6564  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    6565  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    6566 
    6567  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    6568  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
    6569  ~VmaAllocator_T();
    6570 
    6571  const VkAllocationCallbacks* GetAllocationCallbacks() const
    6572  {
    6573  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    6574  }
    6575  const VmaVulkanFunctions& GetVulkanFunctions() const
    6576  {
    6577  return m_VulkanFunctions;
    6578  }
    6579 
    6580  VkDeviceSize GetBufferImageGranularity() const
    6581  {
    6582  return VMA_MAX(
    6583  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    6584  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    6585  }
    6586 
    6587  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    6588  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    6589 
    6590  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    6591  {
    6592  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    6593  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    6594  }
    6595  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
    6596  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
    6597  {
    6598  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
    6599  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    6600  }
    6601  // Minimum alignment for all allocations in specific memory type.
    6602  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
    6603  {
    6604  return IsMemoryTypeNonCoherent(memTypeIndex) ?
    6605  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
    6606  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
    6607  }
    6608 
    6609  bool IsIntegratedGpu() const
    6610  {
    6611  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
    6612  }
    6613 
    6614 #if VMA_RECORDING_ENABLED
    6615  VmaRecorder* GetRecorder() const { return m_pRecorder; }
    6616 #endif
    6617 
    6618  void GetBufferMemoryRequirements(
    6619  VkBuffer hBuffer,
    6620  VkMemoryRequirements& memReq,
    6621  bool& requiresDedicatedAllocation,
    6622  bool& prefersDedicatedAllocation) const;
    6623  void GetImageMemoryRequirements(
    6624  VkImage hImage,
    6625  VkMemoryRequirements& memReq,
    6626  bool& requiresDedicatedAllocation,
    6627  bool& prefersDedicatedAllocation) const;
    6628 
    6629  // Main allocation function.
    6630  VkResult AllocateMemory(
    6631  const VkMemoryRequirements& vkMemReq,
    6632  bool requiresDedicatedAllocation,
    6633  bool prefersDedicatedAllocation,
    6634  VkBuffer dedicatedBuffer,
    6635  VkImage dedicatedImage,
    6636  const VmaAllocationCreateInfo& createInfo,
    6637  VmaSuballocationType suballocType,
    6638  size_t allocationCount,
    6639  VmaAllocation* pAllocations);
    6640 
    6641  // Main deallocation function.
    6642  void FreeMemory(
    6643  size_t allocationCount,
    6644  const VmaAllocation* pAllocations);
    6645 
    6646  VkResult ResizeAllocation(
    6647  const VmaAllocation alloc,
    6648  VkDeviceSize newSize);
    6649 
    6650  void CalculateStats(VmaStats* pStats);
    6651 
    6652 #if VMA_STATS_STRING_ENABLED
    6653  void PrintDetailedMap(class VmaJsonWriter& json);
    6654 #endif
    6655 
    6656  VkResult DefragmentationBegin(
    6657  const VmaDefragmentationInfo2& info,
    6658  VmaDefragmentationStats* pStats,
    6659  VmaDefragmentationContext* pContext);
    6660  VkResult DefragmentationEnd(
    6661  VmaDefragmentationContext context);
    6662 
    6663  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    6664  bool TouchAllocation(VmaAllocation hAllocation);
    6665 
    6666  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    6667  void DestroyPool(VmaPool pool);
    6668  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    6669 
    6670  void SetCurrentFrameIndex(uint32_t frameIndex);
    6671  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
    6672 
    6673  void MakePoolAllocationsLost(
    6674  VmaPool hPool,
    6675  size_t* pLostAllocationCount);
    6676  VkResult CheckPoolCorruption(VmaPool hPool);
    6677  VkResult CheckCorruption(uint32_t memoryTypeBits);
    6678 
    6679  void CreateLostAllocation(VmaAllocation* pAllocation);
    6680 
    6681  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    6682  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    6683 
    6684  VkResult Map(VmaAllocation hAllocation, void** ppData);
    6685  void Unmap(VmaAllocation hAllocation);
    6686 
    6687  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
    6688  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
    6689 
    6690  void FlushOrInvalidateAllocation(
    6691  VmaAllocation hAllocation,
    6692  VkDeviceSize offset, VkDeviceSize size,
    6693  VMA_CACHE_OPERATION op);
    6694 
    6695  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
    6696 
    6697 private:
    6698  VkDeviceSize m_PreferredLargeHeapBlockSize;
    6699 
    6700  VkPhysicalDevice m_PhysicalDevice;
    6701  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    6702 
    6703  VMA_RW_MUTEX m_PoolsMutex;
    6704  // Protected by m_PoolsMutex. Sorted by pointer value.
    6705  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    6706  uint32_t m_NextPoolId;
    6707 
    6708  VmaVulkanFunctions m_VulkanFunctions;
    6709 
    6710 #if VMA_RECORDING_ENABLED
    6711  VmaRecorder* m_pRecorder;
    6712 #endif
    6713 
    6714  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    6715 
    6716  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    6717 
    6718  VkResult AllocateMemoryOfType(
    6719  VkDeviceSize size,
    6720  VkDeviceSize alignment,
    6721  bool dedicatedAllocation,
    6722  VkBuffer dedicatedBuffer,
    6723  VkImage dedicatedImage,
    6724  const VmaAllocationCreateInfo& createInfo,
    6725  uint32_t memTypeIndex,
    6726  VmaSuballocationType suballocType,
    6727  size_t allocationCount,
    6728  VmaAllocation* pAllocations);
    6729 
    6730  // Helper function only to be used inside AllocateDedicatedMemory.
    6731  VkResult AllocateDedicatedMemoryPage(
    6732  VkDeviceSize size,
    6733  VmaSuballocationType suballocType,
    6734  uint32_t memTypeIndex,
    6735  const VkMemoryAllocateInfo& allocInfo,
    6736  bool map,
    6737  bool isUserDataString,
    6738  void* pUserData,
    6739  VmaAllocation* pAllocation);
    6740 
    6741  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
    6742  VkResult AllocateDedicatedMemory(
    6743  VkDeviceSize size,
    6744  VmaSuballocationType suballocType,
    6745  uint32_t memTypeIndex,
    6746  bool map,
    6747  bool isUserDataString,
    6748  void* pUserData,
    6749  VkBuffer dedicatedBuffer,
    6750  VkImage dedicatedImage,
    6751  size_t allocationCount,
    6752  VmaAllocation* pAllocations);
    6753 
    6754  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    6755  void FreeDedicatedMemory(VmaAllocation allocation);
    6756 };
    6757 
    6759 // Memory allocation #2 after VmaAllocator_T definition
    6760 
    6761 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    6762 {
    6763  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    6764 }
    6765 
    6766 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    6767 {
    6768  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    6769 }
    6770 
    6771 template<typename T>
    6772 static T* VmaAllocate(VmaAllocator hAllocator)
    6773 {
    6774  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    6775 }
    6776 
    6777 template<typename T>
    6778 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    6779 {
    6780  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    6781 }
    6782 
    6783 template<typename T>
    6784 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    6785 {
    6786  if(ptr != VMA_NULL)
    6787  {
    6788  ptr->~T();
    6789  VmaFree(hAllocator, ptr);
    6790  }
    6791 }
    6792 
    6793 template<typename T>
    6794 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    6795 {
    6796  if(ptr != VMA_NULL)
    6797  {
    6798  for(size_t i = count; i--; )
    6799  ptr[i].~T();
    6800  VmaFree(hAllocator, ptr);
    6801  }
    6802 }
    6803 
    6805 // VmaStringBuilder
    6806 
    6807 #if VMA_STATS_STRING_ENABLED
    6808 
    6809 class VmaStringBuilder
    6810 {
    6811 public:
    6812  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    6813  size_t GetLength() const { return m_Data.size(); }
    6814  const char* GetData() const { return m_Data.data(); }
    6815 
    6816  void Add(char ch) { m_Data.push_back(ch); }
    6817  void Add(const char* pStr);
    6818  void AddNewLine() { Add('\n'); }
    6819  void AddNumber(uint32_t num);
    6820  void AddNumber(uint64_t num);
    6821  void AddPointer(const void* ptr);
    6822 
    6823 private:
    6824  VmaVector< char, VmaStlAllocator<char> > m_Data;
    6825 };
    6826 
    6827 void VmaStringBuilder::Add(const char* pStr)
    6828 {
    6829  const size_t strLen = strlen(pStr);
    6830  if(strLen > 0)
    6831  {
    6832  const size_t oldCount = m_Data.size();
    6833  m_Data.resize(oldCount + strLen);
    6834  memcpy(m_Data.data() + oldCount, pStr, strLen);
    6835  }
    6836 }
    6837 
    6838 void VmaStringBuilder::AddNumber(uint32_t num)
    6839 {
    6840  char buf[11];
    6841  VmaUint32ToStr(buf, sizeof(buf), num);
    6842  Add(buf);
    6843 }
    6844 
    6845 void VmaStringBuilder::AddNumber(uint64_t num)
    6846 {
    6847  char buf[21];
    6848  VmaUint64ToStr(buf, sizeof(buf), num);
    6849  Add(buf);
    6850 }
    6851 
    6852 void VmaStringBuilder::AddPointer(const void* ptr)
    6853 {
    6854  char buf[21];
    6855  VmaPtrToStr(buf, sizeof(buf), ptr);
    6856  Add(buf);
    6857 }
    6858 
    6859 #endif // #if VMA_STATS_STRING_ENABLED
    6860 
    6862 // VmaJsonWriter
    6863 
    6864 #if VMA_STATS_STRING_ENABLED
    6865 
    6866 class VmaJsonWriter
    6867 {
    6868  VMA_CLASS_NO_COPY(VmaJsonWriter)
    6869 public:
    6870  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    6871  ~VmaJsonWriter();
    6872 
    6873  void BeginObject(bool singleLine = false);
    6874  void EndObject();
    6875 
    6876  void BeginArray(bool singleLine = false);
    6877  void EndArray();
    6878 
    6879  void WriteString(const char* pStr);
    6880  void BeginString(const char* pStr = VMA_NULL);
    6881  void ContinueString(const char* pStr);
    6882  void ContinueString(uint32_t n);
    6883  void ContinueString(uint64_t n);
    6884  void ContinueString_Pointer(const void* ptr);
    6885  void EndString(const char* pStr = VMA_NULL);
    6886 
    6887  void WriteNumber(uint32_t n);
    6888  void WriteNumber(uint64_t n);
    6889  void WriteBool(bool b);
    6890  void WriteNull();
    6891 
    6892 private:
    6893  static const char* const INDENT;
    6894 
    6895  enum COLLECTION_TYPE
    6896  {
    6897  COLLECTION_TYPE_OBJECT,
    6898  COLLECTION_TYPE_ARRAY,
    6899  };
    6900  struct StackItem
    6901  {
    6902  COLLECTION_TYPE type;
    6903  uint32_t valueCount;
    6904  bool singleLineMode;
    6905  };
    6906 
    6907  VmaStringBuilder& m_SB;
    6908  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    6909  bool m_InsideString;
    6910 
    6911  void BeginValue(bool isString);
    6912  void WriteIndent(bool oneLess = false);
    6913 };
    6914 
    6915 const char* const VmaJsonWriter::INDENT = " ";
    6916 
    6917 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    6918  m_SB(sb),
    6919  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    6920  m_InsideString(false)
    6921 {
    6922 }
    6923 
    6924 VmaJsonWriter::~VmaJsonWriter()
    6925 {
    6926  VMA_ASSERT(!m_InsideString);
    6927  VMA_ASSERT(m_Stack.empty());
    6928 }
    6929 
    6930 void VmaJsonWriter::BeginObject(bool singleLine)
    6931 {
    6932  VMA_ASSERT(!m_InsideString);
    6933 
    6934  BeginValue(false);
    6935  m_SB.Add('{');
    6936 
    6937  StackItem item;
    6938  item.type = COLLECTION_TYPE_OBJECT;
    6939  item.valueCount = 0;
    6940  item.singleLineMode = singleLine;
    6941  m_Stack.push_back(item);
    6942 }
    6943 
    6944 void VmaJsonWriter::EndObject()
    6945 {
    6946  VMA_ASSERT(!m_InsideString);
    6947 
    6948  WriteIndent(true);
    6949  m_SB.Add('}');
    6950 
    6951  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    6952  m_Stack.pop_back();
    6953 }
    6954 
    6955 void VmaJsonWriter::BeginArray(bool singleLine)
    6956 {
    6957  VMA_ASSERT(!m_InsideString);
    6958 
    6959  BeginValue(false);
    6960  m_SB.Add('[');
    6961 
    6962  StackItem item;
    6963  item.type = COLLECTION_TYPE_ARRAY;
    6964  item.valueCount = 0;
    6965  item.singleLineMode = singleLine;
    6966  m_Stack.push_back(item);
    6967 }
    6968 
    6969 void VmaJsonWriter::EndArray()
    6970 {
    6971  VMA_ASSERT(!m_InsideString);
    6972 
    6973  WriteIndent(true);
    6974  m_SB.Add(']');
    6975 
    6976  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    6977  m_Stack.pop_back();
    6978 }
    6979 
    6980 void VmaJsonWriter::WriteString(const char* pStr)
    6981 {
    6982  BeginString(pStr);
    6983  EndString();
    6984 }
    6985 
    6986 void VmaJsonWriter::BeginString(const char* pStr)
    6987 {
    6988  VMA_ASSERT(!m_InsideString);
    6989 
    6990  BeginValue(true);
    6991  m_SB.Add('"');
    6992  m_InsideString = true;
    6993  if(pStr != VMA_NULL && pStr[0] != '\0')
    6994  {
    6995  ContinueString(pStr);
    6996  }
    6997 }
    6998 
    6999 void VmaJsonWriter::ContinueString(const char* pStr)
    7000 {
    7001  VMA_ASSERT(m_InsideString);
    7002 
    7003  const size_t strLen = strlen(pStr);
    7004  for(size_t i = 0; i < strLen; ++i)
    7005  {
    7006  char ch = pStr[i];
    7007  if(ch == '\\')
    7008  {
    7009  m_SB.Add("\\\\");
    7010  }
    7011  else if(ch == '"')
    7012  {
    7013  m_SB.Add("\\\"");
    7014  }
    7015  else if(ch >= 32)
    7016  {
    7017  m_SB.Add(ch);
    7018  }
    7019  else switch(ch)
    7020  {
    7021  case '\b':
    7022  m_SB.Add("\\b");
    7023  break;
    7024  case '\f':
    7025  m_SB.Add("\\f");
    7026  break;
    7027  case '\n':
    7028  m_SB.Add("\\n");
    7029  break;
    7030  case '\r':
    7031  m_SB.Add("\\r");
    7032  break;
    7033  case '\t':
    7034  m_SB.Add("\\t");
    7035  break;
    7036  default:
    7037  VMA_ASSERT(0 && "Character not currently supported.");
    7038  break;
    7039  }
    7040  }
    7041 }
    7042 
    7043 void VmaJsonWriter::ContinueString(uint32_t n)
    7044 {
    7045  VMA_ASSERT(m_InsideString);
    7046  m_SB.AddNumber(n);
    7047 }
    7048 
    7049 void VmaJsonWriter::ContinueString(uint64_t n)
    7050 {
    7051  VMA_ASSERT(m_InsideString);
    7052  m_SB.AddNumber(n);
    7053 }
    7054 
    7055 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
    7056 {
    7057  VMA_ASSERT(m_InsideString);
    7058  m_SB.AddPointer(ptr);
    7059 }
    7060 
    7061 void VmaJsonWriter::EndString(const char* pStr)
    7062 {
    7063  VMA_ASSERT(m_InsideString);
    7064  if(pStr != VMA_NULL && pStr[0] != '\0')
    7065  {
    7066  ContinueString(pStr);
    7067  }
    7068  m_SB.Add('"');
    7069  m_InsideString = false;
    7070 }
    7071 
    7072 void VmaJsonWriter::WriteNumber(uint32_t n)
    7073 {
    7074  VMA_ASSERT(!m_InsideString);
    7075  BeginValue(false);
    7076  m_SB.AddNumber(n);
    7077 }
    7078 
    7079 void VmaJsonWriter::WriteNumber(uint64_t n)
    7080 {
    7081  VMA_ASSERT(!m_InsideString);
    7082  BeginValue(false);
    7083  m_SB.AddNumber(n);
    7084 }
    7085 
    7086 void VmaJsonWriter::WriteBool(bool b)
    7087 {
    7088  VMA_ASSERT(!m_InsideString);
    7089  BeginValue(false);
    7090  m_SB.Add(b ? "true" : "false");
    7091 }
    7092 
    7093 void VmaJsonWriter::WriteNull()
    7094 {
    7095  VMA_ASSERT(!m_InsideString);
    7096  BeginValue(false);
    7097  m_SB.Add("null");
    7098 }
    7099 
    7100 void VmaJsonWriter::BeginValue(bool isString)
    7101 {
    7102  if(!m_Stack.empty())
    7103  {
    7104  StackItem& currItem = m_Stack.back();
    7105  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    7106  currItem.valueCount % 2 == 0)
    7107  {
    7108  VMA_ASSERT(isString);
    7109  }
    7110 
    7111  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    7112  currItem.valueCount % 2 != 0)
    7113  {
    7114  m_SB.Add(": ");
    7115  }
    7116  else if(currItem.valueCount > 0)
    7117  {
    7118  m_SB.Add(", ");
    7119  WriteIndent();
    7120  }
    7121  else
    7122  {
    7123  WriteIndent();
    7124  }
    7125  ++currItem.valueCount;
    7126  }
    7127 }
    7128 
    7129 void VmaJsonWriter::WriteIndent(bool oneLess)
    7130 {
    7131  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    7132  {
    7133  m_SB.AddNewLine();
    7134 
    7135  size_t count = m_Stack.size();
    7136  if(count > 0 && oneLess)
    7137  {
    7138  --count;
    7139  }
    7140  for(size_t i = 0; i < count; ++i)
    7141  {
    7142  m_SB.Add(INDENT);
    7143  }
    7144  }
    7145 }
    7146 
    7147 #endif // #if VMA_STATS_STRING_ENABLED
    7148 
    7150 
    7151 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
    7152 {
    7153  if(IsUserDataString())
    7154  {
    7155  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
    7156 
    7157  FreeUserDataString(hAllocator);
    7158 
    7159  if(pUserData != VMA_NULL)
    7160  {
    7161  const char* const newStrSrc = (char*)pUserData;
    7162  const size_t newStrLen = strlen(newStrSrc);
    7163  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
    7164  memcpy(newStrDst, newStrSrc, newStrLen + 1);
    7165  m_pUserData = newStrDst;
    7166  }
    7167  }
    7168  else
    7169  {
    7170  m_pUserData = pUserData;
    7171  }
    7172 }
    7173 
    7174 void VmaAllocation_T::ChangeBlockAllocation(
    7175  VmaAllocator hAllocator,
    7176  VmaDeviceMemoryBlock* block,
    7177  VkDeviceSize offset)
    7178 {
    7179  VMA_ASSERT(block != VMA_NULL);
    7180  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    7181 
    7182  // Move mapping reference counter from old block to new block.
    7183  if(block != m_BlockAllocation.m_Block)
    7184  {
    7185  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
    7186  if(IsPersistentMap())
    7187  ++mapRefCount;
    7188  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
    7189  block->Map(hAllocator, mapRefCount, VMA_NULL);
    7190  }
    7191 
    7192  m_BlockAllocation.m_Block = block;
    7193  m_BlockAllocation.m_Offset = offset;
    7194 }
    7195 
    7196 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
    7197 {
    7198  VMA_ASSERT(newSize > 0);
    7199  m_Size = newSize;
    7200 }
    7201 
    7202 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
    7203 {
    7204  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    7205  m_BlockAllocation.m_Offset = newOffset;
    7206 }
    7207 
    7208 VkDeviceSize VmaAllocation_T::GetOffset() const
    7209 {
    7210  switch(m_Type)
    7211  {
    7212  case ALLOCATION_TYPE_BLOCK:
    7213  return m_BlockAllocation.m_Offset;
    7214  case ALLOCATION_TYPE_DEDICATED:
    7215  return 0;
    7216  default:
    7217  VMA_ASSERT(0);
    7218  return 0;
    7219  }
    7220 }
    7221 
    7222 VkDeviceMemory VmaAllocation_T::GetMemory() const
    7223 {
    7224  switch(m_Type)
    7225  {
    7226  case ALLOCATION_TYPE_BLOCK:
    7227  return m_BlockAllocation.m_Block->GetDeviceMemory();
    7228  case ALLOCATION_TYPE_DEDICATED:
    7229  return m_DedicatedAllocation.m_hMemory;
    7230  default:
    7231  VMA_ASSERT(0);
    7232  return VK_NULL_HANDLE;
    7233  }
    7234 }
    7235 
    7236 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    7237 {
    7238  switch(m_Type)
    7239  {
    7240  case ALLOCATION_TYPE_BLOCK:
    7241  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
    7242  case ALLOCATION_TYPE_DEDICATED:
    7243  return m_DedicatedAllocation.m_MemoryTypeIndex;
    7244  default:
    7245  VMA_ASSERT(0);
    7246  return UINT32_MAX;
    7247  }
    7248 }
    7249 
    7250 void* VmaAllocation_T::GetMappedData() const
    7251 {
    7252  switch(m_Type)
    7253  {
    7254  case ALLOCATION_TYPE_BLOCK:
    7255  if(m_MapCount != 0)
    7256  {
    7257  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
    7258  VMA_ASSERT(pBlockData != VMA_NULL);
    7259  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    7260  }
    7261  else
    7262  {
    7263  return VMA_NULL;
    7264  }
    7265  break;
    7266  case ALLOCATION_TYPE_DEDICATED:
    7267  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    7268  return m_DedicatedAllocation.m_pMappedData;
    7269  default:
    7270  VMA_ASSERT(0);
    7271  return VMA_NULL;
    7272  }
    7273 }
    7274 
    7275 bool VmaAllocation_T::CanBecomeLost() const
    7276 {
    7277  switch(m_Type)
    7278  {
    7279  case ALLOCATION_TYPE_BLOCK:
    7280  return m_BlockAllocation.m_CanBecomeLost;
    7281  case ALLOCATION_TYPE_DEDICATED:
    7282  return false;
    7283  default:
    7284  VMA_ASSERT(0);
    7285  return false;
    7286  }
    7287 }
    7288 
    7289 VmaPool VmaAllocation_T::GetPool() const
    7290 {
    7291  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    7292  return m_BlockAllocation.m_hPool;
    7293 }
    7294 
    7295 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    7296 {
    7297  VMA_ASSERT(CanBecomeLost());
    7298 
    7299  /*
    7300  Warning: This is a carefully designed algorithm.
    7301  Do not modify unless you really know what you're doing :)
    7302  */
    7303  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    7304  for(;;)
    7305  {
    7306  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    7307  {
    7308  VMA_ASSERT(0);
    7309  return false;
    7310  }
    7311  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    7312  {
    7313  return false;
    7314  }
    7315  else // Last use time earlier than current time.
    7316  {
    7317  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    7318  {
    7319  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    7320  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    7321  return true;
    7322  }
    7323  }
    7324  }
    7325 }
    7326 
    7327 #if VMA_STATS_STRING_ENABLED
    7328 
    7329 // Correspond to values of enum VmaSuballocationType.
    7330 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    7331  "FREE",
    7332  "UNKNOWN",
    7333  "BUFFER",
    7334  "IMAGE_UNKNOWN",
    7335  "IMAGE_LINEAR",
    7336  "IMAGE_OPTIMAL",
    7337 };
    7338 
    7339 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
    7340 {
    7341  json.WriteString("Type");
    7342  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
    7343 
    7344  json.WriteString("Size");
    7345  json.WriteNumber(m_Size);
    7346 
    7347  if(m_pUserData != VMA_NULL)
    7348  {
    7349  json.WriteString("UserData");
    7350  if(IsUserDataString())
    7351  {
    7352  json.WriteString((const char*)m_pUserData);
    7353  }
    7354  else
    7355  {
    7356  json.BeginString();
    7357  json.ContinueString_Pointer(m_pUserData);
    7358  json.EndString();
    7359  }
    7360  }
    7361 
    7362  json.WriteString("CreationFrameIndex");
    7363  json.WriteNumber(m_CreationFrameIndex);
    7364 
    7365  json.WriteString("LastUseFrameIndex");
    7366  json.WriteNumber(GetLastUseFrameIndex());
    7367 
    7368  if(m_BufferImageUsage != 0)
    7369  {
    7370  json.WriteString("Usage");
    7371  json.WriteNumber(m_BufferImageUsage);
    7372  }
    7373 }
    7374 
    7375 #endif
    7376 
    7377 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
    7378 {
    7379  VMA_ASSERT(IsUserDataString());
    7380  if(m_pUserData != VMA_NULL)
    7381  {
    7382  char* const oldStr = (char*)m_pUserData;
    7383  const size_t oldStrLen = strlen(oldStr);
    7384  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
    7385  m_pUserData = VMA_NULL;
    7386  }
    7387 }
    7388 
    7389 void VmaAllocation_T::BlockAllocMap()
    7390 {
    7391  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    7392 
    7393  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    7394  {
    7395  ++m_MapCount;
    7396  }
    7397  else
    7398  {
    7399  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
    7400  }
    7401 }
    7402 
    7403 void VmaAllocation_T::BlockAllocUnmap()
    7404 {
    7405  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    7406 
    7407  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    7408  {
    7409  --m_MapCount;
    7410  }
    7411  else
    7412  {
    7413  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
    7414  }
    7415 }
    7416 
    7417 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    7418 {
    7419  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    7420 
    7421  if(m_MapCount != 0)
    7422  {
    7423  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    7424  {
    7425  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    7426  *ppData = m_DedicatedAllocation.m_pMappedData;
    7427  ++m_MapCount;
    7428  return VK_SUCCESS;
    7429  }
    7430  else
    7431  {
    7432  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    7433  return VK_ERROR_MEMORY_MAP_FAILED;
    7434  }
    7435  }
    7436  else
    7437  {
    7438  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    7439  hAllocator->m_hDevice,
    7440  m_DedicatedAllocation.m_hMemory,
    7441  0, // offset
    7442  VK_WHOLE_SIZE,
    7443  0, // flags
    7444  ppData);
    7445  if(result == VK_SUCCESS)
    7446  {
    7447  m_DedicatedAllocation.m_pMappedData = *ppData;
    7448  m_MapCount = 1;
    7449  }
    7450  return result;
    7451  }
    7452 }
    7453 
    7454 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    7455 {
    7456  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    7457 
    7458  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    7459  {
    7460  --m_MapCount;
    7461  if(m_MapCount == 0)
    7462  {
    7463  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    7464  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    7465  hAllocator->m_hDevice,
    7466  m_DedicatedAllocation.m_hMemory);
    7467  }
    7468  }
    7469  else
    7470  {
    7471  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    7472  }
    7473 }
    7474 
    7475 #if VMA_STATS_STRING_ENABLED
    7476 
    7477 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    7478 {
    7479  json.BeginObject();
    7480 
    7481  json.WriteString("Blocks");
    7482  json.WriteNumber(stat.blockCount);
    7483 
    7484  json.WriteString("Allocations");
    7485  json.WriteNumber(stat.allocationCount);
    7486 
    7487  json.WriteString("UnusedRanges");
    7488  json.WriteNumber(stat.unusedRangeCount);
    7489 
    7490  json.WriteString("UsedBytes");
    7491  json.WriteNumber(stat.usedBytes);
    7492 
    7493  json.WriteString("UnusedBytes");
    7494  json.WriteNumber(stat.unusedBytes);
    7495 
    7496  if(stat.allocationCount > 1)
    7497  {
    7498  json.WriteString("AllocationSize");
    7499  json.BeginObject(true);
    7500  json.WriteString("Min");
    7501  json.WriteNumber(stat.allocationSizeMin);
    7502  json.WriteString("Avg");
    7503  json.WriteNumber(stat.allocationSizeAvg);
    7504  json.WriteString("Max");
    7505  json.WriteNumber(stat.allocationSizeMax);
    7506  json.EndObject();
    7507  }
    7508 
    7509  if(stat.unusedRangeCount > 1)
    7510  {
    7511  json.WriteString("UnusedRangeSize");
    7512  json.BeginObject(true);
    7513  json.WriteString("Min");
    7514  json.WriteNumber(stat.unusedRangeSizeMin);
    7515  json.WriteString("Avg");
    7516  json.WriteNumber(stat.unusedRangeSizeAvg);
    7517  json.WriteString("Max");
    7518  json.WriteNumber(stat.unusedRangeSizeMax);
    7519  json.EndObject();
    7520  }
    7521 
    7522  json.EndObject();
    7523 }
    7524 
    7525 #endif // #if VMA_STATS_STRING_ENABLED
    7526 
    7527 struct VmaSuballocationItemSizeLess
    7528 {
    7529  bool operator()(
    7530  const VmaSuballocationList::iterator lhs,
    7531  const VmaSuballocationList::iterator rhs) const
    7532  {
    7533  return lhs->size < rhs->size;
    7534  }
    7535  bool operator()(
    7536  const VmaSuballocationList::iterator lhs,
    7537  VkDeviceSize rhsSize) const
    7538  {
    7539  return lhs->size < rhsSize;
    7540  }
    7541 };
    7542 
    7543 
    7545 // class VmaBlockMetadata
    7546 
    7547 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    7548  m_Size(0),
    7549  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
    7550 {
    7551 }
    7552 
    7553 #if VMA_STATS_STRING_ENABLED
    7554 
    7555 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
    7556  VkDeviceSize unusedBytes,
    7557  size_t allocationCount,
    7558  size_t unusedRangeCount) const
    7559 {
    7560  json.BeginObject();
    7561 
    7562  json.WriteString("TotalBytes");
    7563  json.WriteNumber(GetSize());
    7564 
    7565  json.WriteString("UnusedBytes");
    7566  json.WriteNumber(unusedBytes);
    7567 
    7568  json.WriteString("Allocations");
    7569  json.WriteNumber((uint64_t)allocationCount);
    7570 
    7571  json.WriteString("UnusedRanges");
    7572  json.WriteNumber((uint64_t)unusedRangeCount);
    7573 
    7574  json.WriteString("Suballocations");
    7575  json.BeginArray();
    7576 }
    7577 
    7578 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    7579  VkDeviceSize offset,
    7580  VmaAllocation hAllocation) const
    7581 {
    7582  json.BeginObject(true);
    7583 
    7584  json.WriteString("Offset");
    7585  json.WriteNumber(offset);
    7586 
    7587  hAllocation->PrintParameters(json);
    7588 
    7589  json.EndObject();
    7590 }
    7591 
    7592 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    7593  VkDeviceSize offset,
    7594  VkDeviceSize size) const
    7595 {
    7596  json.BeginObject(true);
    7597 
    7598  json.WriteString("Offset");
    7599  json.WriteNumber(offset);
    7600 
    7601  json.WriteString("Type");
    7602  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
    7603 
    7604  json.WriteString("Size");
    7605  json.WriteNumber(size);
    7606 
    7607  json.EndObject();
    7608 }
    7609 
    7610 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
    7611 {
    7612  json.EndArray();
    7613  json.EndObject();
    7614 }
    7615 
    7616 #endif // #if VMA_STATS_STRING_ENABLED
    7617 
    7619 // class VmaBlockMetadata_Generic
    7620 
    7621 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
    7622  VmaBlockMetadata(hAllocator),
    7623  m_FreeCount(0),
    7624  m_SumFreeSize(0),
    7625  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7626  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    7627 {
    7628 }
    7629 
    7630 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
    7631 {
    7632 }
    7633 
    7634 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
    7635 {
    7636  VmaBlockMetadata::Init(size);
    7637 
    7638  m_FreeCount = 1;
    7639  m_SumFreeSize = size;
    7640 
    7641  VmaSuballocation suballoc = {};
    7642  suballoc.offset = 0;
    7643  suballoc.size = size;
    7644  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7645  suballoc.hAllocation = VK_NULL_HANDLE;
    7646 
    7647  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    7648  m_Suballocations.push_back(suballoc);
    7649  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    7650  --suballocItem;
    7651  m_FreeSuballocationsBySize.push_back(suballocItem);
    7652 }
    7653 
    7654 bool VmaBlockMetadata_Generic::Validate() const
    7655 {
    7656  VMA_VALIDATE(!m_Suballocations.empty());
    7657 
    7658  // Expected offset of new suballocation as calculated from previous ones.
    7659  VkDeviceSize calculatedOffset = 0;
    7660  // Expected number of free suballocations as calculated from traversing their list.
    7661  uint32_t calculatedFreeCount = 0;
    7662  // Expected sum size of free suballocations as calculated from traversing their list.
    7663  VkDeviceSize calculatedSumFreeSize = 0;
    7664  // Expected number of free suballocations that should be registered in
    7665  // m_FreeSuballocationsBySize calculated from traversing their list.
    7666  size_t freeSuballocationsToRegister = 0;
    7667  // True if previous visited suballocation was free.
    7668  bool prevFree = false;
    7669 
    7670  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    7671  suballocItem != m_Suballocations.cend();
    7672  ++suballocItem)
    7673  {
    7674  const VmaSuballocation& subAlloc = *suballocItem;
    7675 
    7676  // Actual offset of this suballocation doesn't match expected one.
    7677  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
    7678 
    7679  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7680  // Two adjacent free suballocations are invalid. They should be merged.
    7681  VMA_VALIDATE(!prevFree || !currFree);
    7682 
    7683  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
    7684 
    7685  if(currFree)
    7686  {
    7687  calculatedSumFreeSize += subAlloc.size;
    7688  ++calculatedFreeCount;
    7689  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7690  {
    7691  ++freeSuballocationsToRegister;
    7692  }
    7693 
    7694  // Margin required between allocations - every free space must be at least that large.
    7695  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
    7696  }
    7697  else
    7698  {
    7699  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
    7700  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
    7701 
    7702  // Margin required between allocations - previous allocation must be free.
    7703  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
    7704  }
    7705 
    7706  calculatedOffset += subAlloc.size;
    7707  prevFree = currFree;
    7708  }
    7709 
    7710  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    7711  // match expected one.
    7712  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
    7713 
    7714  VkDeviceSize lastSize = 0;
    7715  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    7716  {
    7717  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    7718 
    7719  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    7720  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    7721  // They must be sorted by size ascending.
    7722  VMA_VALIDATE(suballocItem->size >= lastSize);
    7723 
    7724  lastSize = suballocItem->size;
    7725  }
    7726 
    7727  // Check if totals match calculacted values.
    7728  VMA_VALIDATE(ValidateFreeSuballocationList());
    7729  VMA_VALIDATE(calculatedOffset == GetSize());
    7730  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
    7731  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
    7732 
    7733  return true;
    7734 }
    7735 
    7736 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
    7737 {
    7738  if(!m_FreeSuballocationsBySize.empty())
    7739  {
    7740  return m_FreeSuballocationsBySize.back()->size;
    7741  }
    7742  else
    7743  {
    7744  return 0;
    7745  }
    7746 }
    7747 
    7748 bool VmaBlockMetadata_Generic::IsEmpty() const
    7749 {
    7750  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    7751 }
    7752 
    7753 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    7754 {
    7755  outInfo.blockCount = 1;
    7756 
    7757  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    7758  outInfo.allocationCount = rangeCount - m_FreeCount;
    7759  outInfo.unusedRangeCount = m_FreeCount;
    7760 
    7761  outInfo.unusedBytes = m_SumFreeSize;
    7762  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
    7763 
    7764  outInfo.allocationSizeMin = UINT64_MAX;
    7765  outInfo.allocationSizeMax = 0;
    7766  outInfo.unusedRangeSizeMin = UINT64_MAX;
    7767  outInfo.unusedRangeSizeMax = 0;
    7768 
    7769  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    7770  suballocItem != m_Suballocations.cend();
    7771  ++suballocItem)
    7772  {
    7773  const VmaSuballocation& suballoc = *suballocItem;
    7774  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    7775  {
    7776  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    7777  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    7778  }
    7779  else
    7780  {
    7781  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    7782  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    7783  }
    7784  }
    7785 }
    7786 
    7787 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
    7788 {
    7789  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    7790 
    7791  inoutStats.size += GetSize();
    7792  inoutStats.unusedSize += m_SumFreeSize;
    7793  inoutStats.allocationCount += rangeCount - m_FreeCount;
    7794  inoutStats.unusedRangeCount += m_FreeCount;
    7795  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    7796 }
    7797 
    7798 #if VMA_STATS_STRING_ENABLED
    7799 
    7800 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
    7801 {
    7802  PrintDetailedMap_Begin(json,
    7803  m_SumFreeSize, // unusedBytes
    7804  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
    7805  m_FreeCount); // unusedRangeCount
    7806 
    7807  size_t i = 0;
    7808  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    7809  suballocItem != m_Suballocations.cend();
    7810  ++suballocItem, ++i)
    7811  {
    7812  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7813  {
    7814  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
    7815  }
    7816  else
    7817  {
    7818  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
    7819  }
    7820  }
    7821 
    7822  PrintDetailedMap_End(json);
    7823 }
    7824 
    7825 #endif // #if VMA_STATS_STRING_ENABLED
    7826 
    7827 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
    7828  uint32_t currentFrameIndex,
    7829  uint32_t frameInUseCount,
    7830  VkDeviceSize bufferImageGranularity,
    7831  VkDeviceSize allocSize,
    7832  VkDeviceSize allocAlignment,
    7833  bool upperAddress,
    7834  VmaSuballocationType allocType,
    7835  bool canMakeOtherLost,
    7836  uint32_t strategy,
    7837  VmaAllocationRequest* pAllocationRequest)
    7838 {
    7839  VMA_ASSERT(allocSize > 0);
    7840  VMA_ASSERT(!upperAddress);
    7841  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    7842  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    7843  VMA_HEAVY_ASSERT(Validate());
    7844 
    7845  // There is not enough total free space in this block to fullfill the request: Early return.
    7846  if(canMakeOtherLost == false &&
    7847  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
    7848  {
    7849  return false;
    7850  }
    7851 
    7852  // New algorithm, efficiently searching freeSuballocationsBySize.
    7853  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    7854  if(freeSuballocCount > 0)
    7855  {
    7857  {
    7858  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
    7859  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7860  m_FreeSuballocationsBySize.data(),
    7861  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    7862  allocSize + 2 * VMA_DEBUG_MARGIN,
    7863  VmaSuballocationItemSizeLess());
    7864  size_t index = it - m_FreeSuballocationsBySize.data();
    7865  for(; index < freeSuballocCount; ++index)
    7866  {
    7867  if(CheckAllocation(
    7868  currentFrameIndex,
    7869  frameInUseCount,
    7870  bufferImageGranularity,
    7871  allocSize,
    7872  allocAlignment,
    7873  allocType,
    7874  m_FreeSuballocationsBySize[index],
    7875  false, // canMakeOtherLost
    7876  &pAllocationRequest->offset,
    7877  &pAllocationRequest->itemsToMakeLostCount,
    7878  &pAllocationRequest->sumFreeSize,
    7879  &pAllocationRequest->sumItemSize))
    7880  {
    7881  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    7882  return true;
    7883  }
    7884  }
    7885  }
    7886  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
    7887  {
    7888  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7889  it != m_Suballocations.end();
    7890  ++it)
    7891  {
    7892  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
    7893  currentFrameIndex,
    7894  frameInUseCount,
    7895  bufferImageGranularity,
    7896  allocSize,
    7897  allocAlignment,
    7898  allocType,
    7899  it,
    7900  false, // canMakeOtherLost
    7901  &pAllocationRequest->offset,
    7902  &pAllocationRequest->itemsToMakeLostCount,
    7903  &pAllocationRequest->sumFreeSize,
    7904  &pAllocationRequest->sumItemSize))
    7905  {
    7906  pAllocationRequest->item = it;
    7907  return true;
    7908  }
    7909  }
    7910  }
    7911  else // WORST_FIT, FIRST_FIT
    7912  {
    7913  // Search staring from biggest suballocations.
    7914  for(size_t index = freeSuballocCount; index--; )
    7915  {
    7916  if(CheckAllocation(
    7917  currentFrameIndex,
    7918  frameInUseCount,
    7919  bufferImageGranularity,
    7920  allocSize,
    7921  allocAlignment,
    7922  allocType,
    7923  m_FreeSuballocationsBySize[index],
    7924  false, // canMakeOtherLost
    7925  &pAllocationRequest->offset,
    7926  &pAllocationRequest->itemsToMakeLostCount,
    7927  &pAllocationRequest->sumFreeSize,
    7928  &pAllocationRequest->sumItemSize))
    7929  {
    7930  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    7931  return true;
    7932  }
    7933  }
    7934  }
    7935  }
    7936 
    7937  if(canMakeOtherLost)
    7938  {
    7939  // Brute-force algorithm. TODO: Come up with something better.
    7940 
    7941  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
    7942  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
    7943 
    7944  VmaAllocationRequest tmpAllocRequest = {};
    7945  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    7946  suballocIt != m_Suballocations.end();
    7947  ++suballocIt)
    7948  {
    7949  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    7950  suballocIt->hAllocation->CanBecomeLost())
    7951  {
    7952  if(CheckAllocation(
    7953  currentFrameIndex,
    7954  frameInUseCount,
    7955  bufferImageGranularity,
    7956  allocSize,
    7957  allocAlignment,
    7958  allocType,
    7959  suballocIt,
    7960  canMakeOtherLost,
    7961  &tmpAllocRequest.offset,
    7962  &tmpAllocRequest.itemsToMakeLostCount,
    7963  &tmpAllocRequest.sumFreeSize,
    7964  &tmpAllocRequest.sumItemSize))
    7965  {
    7966  tmpAllocRequest.item = suballocIt;
    7967 
    7968  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
    7970  {
    7971  *pAllocationRequest = tmpAllocRequest;
    7972  }
    7973  }
    7974  }
    7975  }
    7976 
    7977  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
    7978  {
    7979  return true;
    7980  }
    7981  }
    7982 
    7983  return false;
    7984 }
    7985 
    7986 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
    7987  uint32_t currentFrameIndex,
    7988  uint32_t frameInUseCount,
    7989  VmaAllocationRequest* pAllocationRequest)
    7990 {
    7991  while(pAllocationRequest->itemsToMakeLostCount > 0)
    7992  {
    7993  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    7994  {
    7995  ++pAllocationRequest->item;
    7996  }
    7997  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7998  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    7999  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    8000  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8001  {
    8002  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    8003  --pAllocationRequest->itemsToMakeLostCount;
    8004  }
    8005  else
    8006  {
    8007  return false;
    8008  }
    8009  }
    8010 
    8011  VMA_HEAVY_ASSERT(Validate());
    8012  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    8013  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8014 
    8015  return true;
    8016 }
    8017 
    8018 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    8019 {
    8020  uint32_t lostAllocationCount = 0;
    8021  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    8022  it != m_Suballocations.end();
    8023  ++it)
    8024  {
    8025  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    8026  it->hAllocation->CanBecomeLost() &&
    8027  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8028  {
    8029  it = FreeSuballocation(it);
    8030  ++lostAllocationCount;
    8031  }
    8032  }
    8033  return lostAllocationCount;
    8034 }
    8035 
    8036 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
    8037 {
    8038  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    8039  it != m_Suballocations.end();
    8040  ++it)
    8041  {
    8042  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    8043  {
    8044  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
    8045  {
    8046  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    8047  return VK_ERROR_VALIDATION_FAILED_EXT;
    8048  }
    8049  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
    8050  {
    8051  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    8052  return VK_ERROR_VALIDATION_FAILED_EXT;
    8053  }
    8054  }
    8055  }
    8056 
    8057  return VK_SUCCESS;
    8058 }
    8059 
    8060 void VmaBlockMetadata_Generic::Alloc(
    8061  const VmaAllocationRequest& request,
    8062  VmaSuballocationType type,
    8063  VkDeviceSize allocSize,
    8064  bool upperAddress,
    8065  VmaAllocation hAllocation)
    8066 {
    8067  VMA_ASSERT(!upperAddress);
    8068  VMA_ASSERT(request.item != m_Suballocations.end());
    8069  VmaSuballocation& suballoc = *request.item;
    8070  // Given suballocation is a free block.
    8071  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8072  // Given offset is inside this suballocation.
    8073  VMA_ASSERT(request.offset >= suballoc.offset);
    8074  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    8075  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    8076  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    8077 
    8078  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    8079  // it to become used.
    8080  UnregisterFreeSuballocation(request.item);
    8081 
    8082  suballoc.offset = request.offset;
    8083  suballoc.size = allocSize;
    8084  suballoc.type = type;
    8085  suballoc.hAllocation = hAllocation;
    8086 
    8087  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    8088  if(paddingEnd)
    8089  {
    8090  VmaSuballocation paddingSuballoc = {};
    8091  paddingSuballoc.offset = request.offset + allocSize;
    8092  paddingSuballoc.size = paddingEnd;
    8093  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8094  VmaSuballocationList::iterator next = request.item;
    8095  ++next;
    8096  const VmaSuballocationList::iterator paddingEndItem =
    8097  m_Suballocations.insert(next, paddingSuballoc);
    8098  RegisterFreeSuballocation(paddingEndItem);
    8099  }
    8100 
    8101  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    8102  if(paddingBegin)
    8103  {
    8104  VmaSuballocation paddingSuballoc = {};
    8105  paddingSuballoc.offset = request.offset - paddingBegin;
    8106  paddingSuballoc.size = paddingBegin;
    8107  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8108  const VmaSuballocationList::iterator paddingBeginItem =
    8109  m_Suballocations.insert(request.item, paddingSuballoc);
    8110  RegisterFreeSuballocation(paddingBeginItem);
    8111  }
    8112 
    8113  // Update totals.
    8114  m_FreeCount = m_FreeCount - 1;
    8115  if(paddingBegin > 0)
    8116  {
    8117  ++m_FreeCount;
    8118  }
    8119  if(paddingEnd > 0)
    8120  {
    8121  ++m_FreeCount;
    8122  }
    8123  m_SumFreeSize -= allocSize;
    8124 }
    8125 
    8126 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
    8127 {
    8128  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    8129  suballocItem != m_Suballocations.end();
    8130  ++suballocItem)
    8131  {
    8132  VmaSuballocation& suballoc = *suballocItem;
    8133  if(suballoc.hAllocation == allocation)
    8134  {
    8135  FreeSuballocation(suballocItem);
    8136  VMA_HEAVY_ASSERT(Validate());
    8137  return;
    8138  }
    8139  }
    8140  VMA_ASSERT(0 && "Not found!");
    8141 }
    8142 
    8143 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
    8144 {
    8145  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    8146  suballocItem != m_Suballocations.end();
    8147  ++suballocItem)
    8148  {
    8149  VmaSuballocation& suballoc = *suballocItem;
    8150  if(suballoc.offset == offset)
    8151  {
    8152  FreeSuballocation(suballocItem);
    8153  return;
    8154  }
    8155  }
    8156  VMA_ASSERT(0 && "Not found!");
    8157 }
    8158 
    8159 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
    8160 {
    8161  typedef VmaSuballocationList::iterator iter_type;
    8162  for(iter_type suballocItem = m_Suballocations.begin();
    8163  suballocItem != m_Suballocations.end();
    8164  ++suballocItem)
    8165  {
    8166  VmaSuballocation& suballoc = *suballocItem;
    8167  if(suballoc.hAllocation == alloc)
    8168  {
    8169  iter_type nextItem = suballocItem;
    8170  ++nextItem;
    8171 
    8172  // Should have been ensured on higher level.
    8173  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
    8174 
    8175  // Shrinking.
    8176  if(newSize < alloc->GetSize())
    8177  {
    8178  const VkDeviceSize sizeDiff = suballoc.size - newSize;
    8179 
    8180  // There is next item.
    8181  if(nextItem != m_Suballocations.end())
    8182  {
    8183  // Next item is free.
    8184  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8185  {
    8186  // Grow this next item backward.
    8187  UnregisterFreeSuballocation(nextItem);
    8188  nextItem->offset -= sizeDiff;
    8189  nextItem->size += sizeDiff;
    8190  RegisterFreeSuballocation(nextItem);
    8191  }
    8192  // Next item is not free.
    8193  else
    8194  {
    8195  // Create free item after current one.
    8196  VmaSuballocation newFreeSuballoc;
    8197  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    8198  newFreeSuballoc.offset = suballoc.offset + newSize;
    8199  newFreeSuballoc.size = sizeDiff;
    8200  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8201  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
    8202  RegisterFreeSuballocation(newFreeSuballocIt);
    8203 
    8204  ++m_FreeCount;
    8205  }
    8206  }
    8207  // This is the last item.
    8208  else
    8209  {
    8210  // Create free item at the end.
    8211  VmaSuballocation newFreeSuballoc;
    8212  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    8213  newFreeSuballoc.offset = suballoc.offset + newSize;
    8214  newFreeSuballoc.size = sizeDiff;
    8215  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8216  m_Suballocations.push_back(newFreeSuballoc);
    8217 
    8218  iter_type newFreeSuballocIt = m_Suballocations.end();
    8219  RegisterFreeSuballocation(--newFreeSuballocIt);
    8220 
    8221  ++m_FreeCount;
    8222  }
    8223 
    8224  suballoc.size = newSize;
    8225  m_SumFreeSize += sizeDiff;
    8226  }
    8227  // Growing.
    8228  else
    8229  {
    8230  const VkDeviceSize sizeDiff = newSize - suballoc.size;
    8231 
    8232  // There is next item.
    8233  if(nextItem != m_Suballocations.end())
    8234  {
    8235  // Next item is free.
    8236  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8237  {
    8238  // There is not enough free space, including margin.
    8239  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
    8240  {
    8241  return false;
    8242  }
    8243 
    8244  // There is more free space than required.
    8245  if(nextItem->size > sizeDiff)
    8246  {
    8247  // Move and shrink this next item.
    8248  UnregisterFreeSuballocation(nextItem);
    8249  nextItem->offset += sizeDiff;
    8250  nextItem->size -= sizeDiff;
    8251  RegisterFreeSuballocation(nextItem);
    8252  }
    8253  // There is exactly the amount of free space required.
    8254  else
    8255  {
    8256  // Remove this next free item.
    8257  UnregisterFreeSuballocation(nextItem);
    8258  m_Suballocations.erase(nextItem);
    8259  --m_FreeCount;
    8260  }
    8261  }
    8262  // Next item is not free - there is no space to grow.
    8263  else
    8264  {
    8265  return false;
    8266  }
    8267  }
    8268  // This is the last item - there is no space to grow.
    8269  else
    8270  {
    8271  return false;
    8272  }
    8273 
    8274  suballoc.size = newSize;
    8275  m_SumFreeSize -= sizeDiff;
    8276  }
    8277 
    8278  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
    8279  return true;
    8280  }
    8281  }
    8282  VMA_ASSERT(0 && "Not found!");
    8283  return false;
    8284 }
    8285 
    8286 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
    8287 {
    8288  VkDeviceSize lastSize = 0;
    8289  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    8290  {
    8291  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    8292 
    8293  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
    8294  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    8295  VMA_VALIDATE(it->size >= lastSize);
    8296  lastSize = it->size;
    8297  }
    8298  return true;
    8299 }
    8300 
    8301 bool VmaBlockMetadata_Generic::CheckAllocation(
    8302  uint32_t currentFrameIndex,
    8303  uint32_t frameInUseCount,
    8304  VkDeviceSize bufferImageGranularity,
    8305  VkDeviceSize allocSize,
    8306  VkDeviceSize allocAlignment,
    8307  VmaSuballocationType allocType,
    8308  VmaSuballocationList::const_iterator suballocItem,
    8309  bool canMakeOtherLost,
    8310  VkDeviceSize* pOffset,
    8311  size_t* itemsToMakeLostCount,
    8312  VkDeviceSize* pSumFreeSize,
    8313  VkDeviceSize* pSumItemSize) const
    8314 {
    8315  VMA_ASSERT(allocSize > 0);
    8316  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    8317  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    8318  VMA_ASSERT(pOffset != VMA_NULL);
    8319 
    8320  *itemsToMakeLostCount = 0;
    8321  *pSumFreeSize = 0;
    8322  *pSumItemSize = 0;
    8323 
    8324  if(canMakeOtherLost)
    8325  {
    8326  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8327  {
    8328  *pSumFreeSize = suballocItem->size;
    8329  }
    8330  else
    8331  {
    8332  if(suballocItem->hAllocation->CanBecomeLost() &&
    8333  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8334  {
    8335  ++*itemsToMakeLostCount;
    8336  *pSumItemSize = suballocItem->size;
    8337  }
    8338  else
    8339  {
    8340  return false;
    8341  }
    8342  }
    8343 
    8344  // Remaining size is too small for this request: Early return.
    8345  if(GetSize() - suballocItem->offset < allocSize)
    8346  {
    8347  return false;
    8348  }
    8349 
    8350  // Start from offset equal to beginning of this suballocation.
    8351  *pOffset = suballocItem->offset;
    8352 
    8353  // Apply VMA_DEBUG_MARGIN at the beginning.
    8354  if(VMA_DEBUG_MARGIN > 0)
    8355  {
    8356  *pOffset += VMA_DEBUG_MARGIN;
    8357  }
    8358 
    8359  // Apply alignment.
    8360  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    8361 
    8362  // Check previous suballocations for BufferImageGranularity conflicts.
    8363  // Make bigger alignment if necessary.
    8364  if(bufferImageGranularity > 1)
    8365  {
    8366  bool bufferImageGranularityConflict = false;
    8367  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    8368  while(prevSuballocItem != m_Suballocations.cbegin())
    8369  {
    8370  --prevSuballocItem;
    8371  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    8372  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    8373  {
    8374  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8375  {
    8376  bufferImageGranularityConflict = true;
    8377  break;
    8378  }
    8379  }
    8380  else
    8381  // Already on previous page.
    8382  break;
    8383  }
    8384  if(bufferImageGranularityConflict)
    8385  {
    8386  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    8387  }
    8388  }
    8389 
    8390  // Now that we have final *pOffset, check if we are past suballocItem.
    8391  // If yes, return false - this function should be called for another suballocItem as starting point.
    8392  if(*pOffset >= suballocItem->offset + suballocItem->size)
    8393  {
    8394  return false;
    8395  }
    8396 
    8397  // Calculate padding at the beginning based on current offset.
    8398  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    8399 
    8400  // Calculate required margin at the end.
    8401  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    8402 
    8403  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    8404  // Another early return check.
    8405  if(suballocItem->offset + totalSize > GetSize())
    8406  {
    8407  return false;
    8408  }
    8409 
    8410  // Advance lastSuballocItem until desired size is reached.
    8411  // Update itemsToMakeLostCount.
    8412  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    8413  if(totalSize > suballocItem->size)
    8414  {
    8415  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    8416  while(remainingSize > 0)
    8417  {
    8418  ++lastSuballocItem;
    8419  if(lastSuballocItem == m_Suballocations.cend())
    8420  {
    8421  return false;
    8422  }
    8423  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8424  {
    8425  *pSumFreeSize += lastSuballocItem->size;
    8426  }
    8427  else
    8428  {
    8429  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    8430  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    8431  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8432  {
    8433  ++*itemsToMakeLostCount;
    8434  *pSumItemSize += lastSuballocItem->size;
    8435  }
    8436  else
    8437  {
    8438  return false;
    8439  }
    8440  }
    8441  remainingSize = (lastSuballocItem->size < remainingSize) ?
    8442  remainingSize - lastSuballocItem->size : 0;
    8443  }
    8444  }
    8445 
    8446  // Check next suballocations for BufferImageGranularity conflicts.
    8447  // If conflict exists, we must mark more allocations lost or fail.
    8448  if(bufferImageGranularity > 1)
    8449  {
    8450  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    8451  ++nextSuballocItem;
    8452  while(nextSuballocItem != m_Suballocations.cend())
    8453  {
    8454  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    8455  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8456  {
    8457  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8458  {
    8459  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    8460  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    8461  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8462  {
    8463  ++*itemsToMakeLostCount;
    8464  }
    8465  else
    8466  {
    8467  return false;
    8468  }
    8469  }
    8470  }
    8471  else
    8472  {
    8473  // Already on next page.
    8474  break;
    8475  }
    8476  ++nextSuballocItem;
    8477  }
    8478  }
    8479  }
    8480  else
    8481  {
    8482  const VmaSuballocation& suballoc = *suballocItem;
    8483  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8484 
    8485  *pSumFreeSize = suballoc.size;
    8486 
    8487  // Size of this suballocation is too small for this request: Early return.
    8488  if(suballoc.size < allocSize)
    8489  {
    8490  return false;
    8491  }
    8492 
    8493  // Start from offset equal to beginning of this suballocation.
    8494  *pOffset = suballoc.offset;
    8495 
    8496  // Apply VMA_DEBUG_MARGIN at the beginning.
    8497  if(VMA_DEBUG_MARGIN > 0)
    8498  {
    8499  *pOffset += VMA_DEBUG_MARGIN;
    8500  }
    8501 
    8502  // Apply alignment.
    8503  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    8504 
    8505  // Check previous suballocations for BufferImageGranularity conflicts.
    8506  // Make bigger alignment if necessary.
    8507  if(bufferImageGranularity > 1)
    8508  {
    8509  bool bufferImageGranularityConflict = false;
    8510  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    8511  while(prevSuballocItem != m_Suballocations.cbegin())
    8512  {
    8513  --prevSuballocItem;
    8514  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    8515  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    8516  {
    8517  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8518  {
    8519  bufferImageGranularityConflict = true;
    8520  break;
    8521  }
    8522  }
    8523  else
    8524  // Already on previous page.
    8525  break;
    8526  }
    8527  if(bufferImageGranularityConflict)
    8528  {
    8529  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    8530  }
    8531  }
    8532 
    8533  // Calculate padding at the beginning based on current offset.
    8534  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    8535 
    8536  // Calculate required margin at the end.
    8537  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    8538 
    8539  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    8540  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    8541  {
    8542  return false;
    8543  }
    8544 
    8545  // Check next suballocations for BufferImageGranularity conflicts.
    8546  // If conflict exists, allocation cannot be made here.
    8547  if(bufferImageGranularity > 1)
    8548  {
    8549  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    8550  ++nextSuballocItem;
    8551  while(nextSuballocItem != m_Suballocations.cend())
    8552  {
    8553  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    8554  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8555  {
    8556  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8557  {
    8558  return false;
    8559  }
    8560  }
    8561  else
    8562  {
    8563  // Already on next page.
    8564  break;
    8565  }
    8566  ++nextSuballocItem;
    8567  }
    8568  }
    8569  }
    8570 
    8571  // All tests passed: Success. pOffset is already filled.
    8572  return true;
    8573 }
    8574 
    8575 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
    8576 {
    8577  VMA_ASSERT(item != m_Suballocations.end());
    8578  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8579 
    8580  VmaSuballocationList::iterator nextItem = item;
    8581  ++nextItem;
    8582  VMA_ASSERT(nextItem != m_Suballocations.end());
    8583  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    8584 
    8585  item->size += nextItem->size;
    8586  --m_FreeCount;
    8587  m_Suballocations.erase(nextItem);
    8588 }
    8589 
    8590 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    8591 {
    8592  // Change this suballocation to be marked as free.
    8593  VmaSuballocation& suballoc = *suballocItem;
    8594  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8595  suballoc.hAllocation = VK_NULL_HANDLE;
    8596 
    8597  // Update totals.
    8598  ++m_FreeCount;
    8599  m_SumFreeSize += suballoc.size;
    8600 
    8601  // Merge with previous and/or next suballocation if it's also free.
    8602  bool mergeWithNext = false;
    8603  bool mergeWithPrev = false;
    8604 
    8605  VmaSuballocationList::iterator nextItem = suballocItem;
    8606  ++nextItem;
    8607  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    8608  {
    8609  mergeWithNext = true;
    8610  }
    8611 
    8612  VmaSuballocationList::iterator prevItem = suballocItem;
    8613  if(suballocItem != m_Suballocations.begin())
    8614  {
    8615  --prevItem;
    8616  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8617  {
    8618  mergeWithPrev = true;
    8619  }
    8620  }
    8621 
    8622  if(mergeWithNext)
    8623  {
    8624  UnregisterFreeSuballocation(nextItem);
    8625  MergeFreeWithNext(suballocItem);
    8626  }
    8627 
    8628  if(mergeWithPrev)
    8629  {
    8630  UnregisterFreeSuballocation(prevItem);
    8631  MergeFreeWithNext(prevItem);
    8632  RegisterFreeSuballocation(prevItem);
    8633  return prevItem;
    8634  }
    8635  else
    8636  {
    8637  RegisterFreeSuballocation(suballocItem);
    8638  return suballocItem;
    8639  }
    8640 }
    8641 
    8642 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    8643 {
    8644  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8645  VMA_ASSERT(item->size > 0);
    8646 
    8647  // You may want to enable this validation at the beginning or at the end of
    8648  // this function, depending on what do you want to check.
    8649  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8650 
    8651  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    8652  {
    8653  if(m_FreeSuballocationsBySize.empty())
    8654  {
    8655  m_FreeSuballocationsBySize.push_back(item);
    8656  }
    8657  else
    8658  {
    8659  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    8660  }
    8661  }
    8662 
    8663  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8664 }
    8665 
    8666 
    8667 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    8668 {
    8669  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8670  VMA_ASSERT(item->size > 0);
    8671 
    8672  // You may want to enable this validation at the beginning or at the end of
    8673  // this function, depending on what do you want to check.
    8674  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8675 
    8676  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    8677  {
    8678  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    8679  m_FreeSuballocationsBySize.data(),
    8680  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    8681  item,
    8682  VmaSuballocationItemSizeLess());
    8683  for(size_t index = it - m_FreeSuballocationsBySize.data();
    8684  index < m_FreeSuballocationsBySize.size();
    8685  ++index)
    8686  {
    8687  if(m_FreeSuballocationsBySize[index] == item)
    8688  {
    8689  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    8690  return;
    8691  }
    8692  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    8693  }
    8694  VMA_ASSERT(0 && "Not found.");
    8695  }
    8696 
    8697  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8698 }
    8699 
    8700 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
    8701  VkDeviceSize bufferImageGranularity,
    8702  VmaSuballocationType& inOutPrevSuballocType) const
    8703 {
    8704  if(bufferImageGranularity == 1 || IsEmpty())
    8705  {
    8706  return false;
    8707  }
    8708 
    8709  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
    8710  bool typeConflictFound = false;
    8711  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
    8712  it != m_Suballocations.cend();
    8713  ++it)
    8714  {
    8715  const VmaSuballocationType suballocType = it->type;
    8716  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
    8717  {
    8718  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
    8719  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
    8720  {
    8721  typeConflictFound = true;
    8722  }
    8723  inOutPrevSuballocType = suballocType;
    8724  }
    8725  }
    8726 
    8727  return typeConflictFound || minAlignment >= bufferImageGranularity;
    8728 }
    8729 
    8731 // class VmaBlockMetadata_Linear
    8732 
    8733 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
    8734  VmaBlockMetadata(hAllocator),
    8735  m_SumFreeSize(0),
    8736  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    8737  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    8738  m_1stVectorIndex(0),
    8739  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
    8740  m_1stNullItemsBeginCount(0),
    8741  m_1stNullItemsMiddleCount(0),
    8742  m_2ndNullItemsCount(0)
    8743 {
    8744 }
    8745 
    8746 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
    8747 {
    8748 }
    8749 
    8750 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
    8751 {
    8752  VmaBlockMetadata::Init(size);
    8753  m_SumFreeSize = size;
    8754 }
    8755 
    8756 bool VmaBlockMetadata_Linear::Validate() const
    8757 {
    8758  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8759  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8760 
    8761  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
    8762  VMA_VALIDATE(!suballocations1st.empty() ||
    8763  suballocations2nd.empty() ||
    8764  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
    8765 
    8766  if(!suballocations1st.empty())
    8767  {
    8768  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
    8769  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
    8770  // Null item at the end should be just pop_back().
    8771  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
    8772  }
    8773  if(!suballocations2nd.empty())
    8774  {
    8775  // Null item at the end should be just pop_back().
    8776  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
    8777  }
    8778 
    8779  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
    8780  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
    8781 
    8782  VkDeviceSize sumUsedSize = 0;
    8783  const size_t suballoc1stCount = suballocations1st.size();
    8784  VkDeviceSize offset = VMA_DEBUG_MARGIN;
    8785 
    8786  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8787  {
    8788  const size_t suballoc2ndCount = suballocations2nd.size();
    8789  size_t nullItem2ndCount = 0;
    8790  for(size_t i = 0; i < suballoc2ndCount; ++i)
    8791  {
    8792  const VmaSuballocation& suballoc = suballocations2nd[i];
    8793  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8794 
    8795  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    8796  VMA_VALIDATE(suballoc.offset >= offset);
    8797 
    8798  if(!currFree)
    8799  {
    8800  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    8801  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    8802  sumUsedSize += suballoc.size;
    8803  }
    8804  else
    8805  {
    8806  ++nullItem2ndCount;
    8807  }
    8808 
    8809  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    8810  }
    8811 
    8812  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    8813  }
    8814 
    8815  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
    8816  {
    8817  const VmaSuballocation& suballoc = suballocations1st[i];
    8818  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
    8819  suballoc.hAllocation == VK_NULL_HANDLE);
    8820  }
    8821 
    8822  size_t nullItem1stCount = m_1stNullItemsBeginCount;
    8823 
    8824  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
    8825  {
    8826  const VmaSuballocation& suballoc = suballocations1st[i];
    8827  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8828 
    8829  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    8830  VMA_VALIDATE(suballoc.offset >= offset);
    8831  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
    8832 
    8833  if(!currFree)
    8834  {
    8835  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    8836  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    8837  sumUsedSize += suballoc.size;
    8838  }
    8839  else
    8840  {
    8841  ++nullItem1stCount;
    8842  }
    8843 
    8844  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    8845  }
    8846  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
    8847 
    8848  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8849  {
    8850  const size_t suballoc2ndCount = suballocations2nd.size();
    8851  size_t nullItem2ndCount = 0;
    8852  for(size_t i = suballoc2ndCount; i--; )
    8853  {
    8854  const VmaSuballocation& suballoc = suballocations2nd[i];
    8855  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8856 
    8857  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    8858  VMA_VALIDATE(suballoc.offset >= offset);
    8859 
    8860  if(!currFree)
    8861  {
    8862  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    8863  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    8864  sumUsedSize += suballoc.size;
    8865  }
    8866  else
    8867  {
    8868  ++nullItem2ndCount;
    8869  }
    8870 
    8871  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    8872  }
    8873 
    8874  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    8875  }
    8876 
    8877  VMA_VALIDATE(offset <= GetSize());
    8878  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
    8879 
    8880  return true;
    8881 }
    8882 
    8883 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
    8884 {
    8885  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
    8886  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
    8887 }
    8888 
    8889 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
    8890 {
    8891  const VkDeviceSize size = GetSize();
    8892 
    8893  /*
    8894  We don't consider gaps inside allocation vectors with freed allocations because
    8895  they are not suitable for reuse in linear allocator. We consider only space that
    8896  is available for new allocations.
    8897  */
    8898  if(IsEmpty())
    8899  {
    8900  return size;
    8901  }
    8902 
    8903  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8904 
    8905  switch(m_2ndVectorMode)
    8906  {
    8907  case SECOND_VECTOR_EMPTY:
    8908  /*
    8909  Available space is after end of 1st, as well as before beginning of 1st (which
    8910  whould make it a ring buffer).
    8911  */
    8912  {
    8913  const size_t suballocations1stCount = suballocations1st.size();
    8914  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
    8915  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    8916  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
    8917  return VMA_MAX(
    8918  firstSuballoc.offset,
    8919  size - (lastSuballoc.offset + lastSuballoc.size));
    8920  }
    8921  break;
    8922 
    8923  case SECOND_VECTOR_RING_BUFFER:
    8924  /*
    8925  Available space is only between end of 2nd and beginning of 1st.
    8926  */
    8927  {
    8928  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8929  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
    8930  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
    8931  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
    8932  }
    8933  break;
    8934 
    8935  case SECOND_VECTOR_DOUBLE_STACK:
    8936  /*
    8937  Available space is only between end of 1st and top of 2nd.
    8938  */
    8939  {
    8940  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8941  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
    8942  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
    8943  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
    8944  }
    8945  break;
    8946 
    8947  default:
    8948  VMA_ASSERT(0);
    8949  return 0;
    8950  }
    8951 }
    8952 
    8953 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    8954 {
    8955  const VkDeviceSize size = GetSize();
    8956  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8957  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8958  const size_t suballoc1stCount = suballocations1st.size();
    8959  const size_t suballoc2ndCount = suballocations2nd.size();
    8960 
    8961  outInfo.blockCount = 1;
    8962  outInfo.allocationCount = (uint32_t)GetAllocationCount();
    8963  outInfo.unusedRangeCount = 0;
    8964  outInfo.usedBytes = 0;
    8965  outInfo.allocationSizeMin = UINT64_MAX;
    8966  outInfo.allocationSizeMax = 0;
    8967  outInfo.unusedRangeSizeMin = UINT64_MAX;
    8968  outInfo.unusedRangeSizeMax = 0;
    8969 
    8970  VkDeviceSize lastOffset = 0;
    8971 
    8972  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8973  {
    8974  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8975  size_t nextAlloc2ndIndex = 0;
    8976  while(lastOffset < freeSpace2ndTo1stEnd)
    8977  {
    8978  // Find next non-null allocation or move nextAllocIndex to the end.
    8979  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8980  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8981  {
    8982  ++nextAlloc2ndIndex;
    8983  }
    8984 
    8985  // Found non-null allocation.
    8986  if(nextAlloc2ndIndex < suballoc2ndCount)
    8987  {
    8988  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8989 
    8990  // 1. Process free space before this allocation.
    8991  if(lastOffset < suballoc.offset)
    8992  {
    8993  // There is free space from lastOffset to suballoc.offset.
    8994  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8995  ++outInfo.unusedRangeCount;
    8996  outInfo.unusedBytes += unusedRangeSize;
    8997  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8998  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8999  }
    9000 
    9001  // 2. Process this allocation.
    9002  // There is allocation with suballoc.offset, suballoc.size.
    9003  outInfo.usedBytes += suballoc.size;
    9004  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    9005  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    9006 
    9007  // 3. Prepare for next iteration.
    9008  lastOffset = suballoc.offset + suballoc.size;
    9009  ++nextAlloc2ndIndex;
    9010  }
    9011  // We are at the end.
    9012  else
    9013  {
    9014  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9015  if(lastOffset < freeSpace2ndTo1stEnd)
    9016  {
    9017  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    9018  ++outInfo.unusedRangeCount;
    9019  outInfo.unusedBytes += unusedRangeSize;
    9020  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9021  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9022  }
    9023 
    9024  // End of loop.
    9025  lastOffset = freeSpace2ndTo1stEnd;
    9026  }
    9027  }
    9028  }
    9029 
    9030  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9031  const VkDeviceSize freeSpace1stTo2ndEnd =
    9032  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    9033  while(lastOffset < freeSpace1stTo2ndEnd)
    9034  {
    9035  // Find next non-null allocation or move nextAllocIndex to the end.
    9036  while(nextAlloc1stIndex < suballoc1stCount &&
    9037  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9038  {
    9039  ++nextAlloc1stIndex;
    9040  }
    9041 
    9042  // Found non-null allocation.
    9043  if(nextAlloc1stIndex < suballoc1stCount)
    9044  {
    9045  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9046 
    9047  // 1. Process free space before this allocation.
    9048  if(lastOffset < suballoc.offset)
    9049  {
    9050  // There is free space from lastOffset to suballoc.offset.
    9051  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9052  ++outInfo.unusedRangeCount;
    9053  outInfo.unusedBytes += unusedRangeSize;
    9054  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9055  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9056  }
    9057 
    9058  // 2. Process this allocation.
    9059  // There is allocation with suballoc.offset, suballoc.size.
    9060  outInfo.usedBytes += suballoc.size;
    9061  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    9062  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    9063 
    9064  // 3. Prepare for next iteration.
    9065  lastOffset = suballoc.offset + suballoc.size;
    9066  ++nextAlloc1stIndex;
    9067  }
    9068  // We are at the end.
    9069  else
    9070  {
    9071  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9072  if(lastOffset < freeSpace1stTo2ndEnd)
    9073  {
    9074  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    9075  ++outInfo.unusedRangeCount;
    9076  outInfo.unusedBytes += unusedRangeSize;
    9077  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9078  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9079  }
    9080 
    9081  // End of loop.
    9082  lastOffset = freeSpace1stTo2ndEnd;
    9083  }
    9084  }
    9085 
    9086  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9087  {
    9088  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9089  while(lastOffset < size)
    9090  {
    9091  // Find next non-null allocation or move nextAllocIndex to the end.
    9092  while(nextAlloc2ndIndex != SIZE_MAX &&
    9093  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9094  {
    9095  --nextAlloc2ndIndex;
    9096  }
    9097 
    9098  // Found non-null allocation.
    9099  if(nextAlloc2ndIndex != SIZE_MAX)
    9100  {
    9101  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9102 
    9103  // 1. Process free space before this allocation.
    9104  if(lastOffset < suballoc.offset)
    9105  {
    9106  // There is free space from lastOffset to suballoc.offset.
    9107  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9108  ++outInfo.unusedRangeCount;
    9109  outInfo.unusedBytes += unusedRangeSize;
    9110  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9111  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9112  }
    9113 
    9114  // 2. Process this allocation.
    9115  // There is allocation with suballoc.offset, suballoc.size.
    9116  outInfo.usedBytes += suballoc.size;
    9117  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    9118  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    9119 
    9120  // 3. Prepare for next iteration.
    9121  lastOffset = suballoc.offset + suballoc.size;
    9122  --nextAlloc2ndIndex;
    9123  }
    9124  // We are at the end.
    9125  else
    9126  {
    9127  // There is free space from lastOffset to size.
    9128  if(lastOffset < size)
    9129  {
    9130  const VkDeviceSize unusedRangeSize = size - lastOffset;
    9131  ++outInfo.unusedRangeCount;
    9132  outInfo.unusedBytes += unusedRangeSize;
    9133  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9134  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9135  }
    9136 
    9137  // End of loop.
    9138  lastOffset = size;
    9139  }
    9140  }
    9141  }
    9142 
    9143  outInfo.unusedBytes = size - outInfo.usedBytes;
    9144 }
    9145 
    9146 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
    9147 {
    9148  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9149  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9150  const VkDeviceSize size = GetSize();
    9151  const size_t suballoc1stCount = suballocations1st.size();
    9152  const size_t suballoc2ndCount = suballocations2nd.size();
    9153 
    9154  inoutStats.size += size;
    9155 
    9156  VkDeviceSize lastOffset = 0;
    9157 
    9158  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9159  {
    9160  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9161  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
    9162  while(lastOffset < freeSpace2ndTo1stEnd)
    9163  {
    9164  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9165  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9166  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9167  {
    9168  ++nextAlloc2ndIndex;
    9169  }
    9170 
    9171  // Found non-null allocation.
    9172  if(nextAlloc2ndIndex < suballoc2ndCount)
    9173  {
    9174  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9175 
    9176  // 1. Process free space before this allocation.
    9177  if(lastOffset < suballoc.offset)
    9178  {
    9179  // There is free space from lastOffset to suballoc.offset.
    9180  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9181  inoutStats.unusedSize += unusedRangeSize;
    9182  ++inoutStats.unusedRangeCount;
    9183  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9184  }
    9185 
    9186  // 2. Process this allocation.
    9187  // There is allocation with suballoc.offset, suballoc.size.
    9188  ++inoutStats.allocationCount;
    9189 
    9190  // 3. Prepare for next iteration.
    9191  lastOffset = suballoc.offset + suballoc.size;
    9192  ++nextAlloc2ndIndex;
    9193  }
    9194  // We are at the end.
    9195  else
    9196  {
    9197  if(lastOffset < freeSpace2ndTo1stEnd)
    9198  {
    9199  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9200  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    9201  inoutStats.unusedSize += unusedRangeSize;
    9202  ++inoutStats.unusedRangeCount;
    9203  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9204  }
    9205 
    9206  // End of loop.
    9207  lastOffset = freeSpace2ndTo1stEnd;
    9208  }
    9209  }
    9210  }
    9211 
    9212  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9213  const VkDeviceSize freeSpace1stTo2ndEnd =
    9214  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    9215  while(lastOffset < freeSpace1stTo2ndEnd)
    9216  {
    9217  // Find next non-null allocation or move nextAllocIndex to the end.
    9218  while(nextAlloc1stIndex < suballoc1stCount &&
    9219  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9220  {
    9221  ++nextAlloc1stIndex;
    9222  }
    9223 
    9224  // Found non-null allocation.
    9225  if(nextAlloc1stIndex < suballoc1stCount)
    9226  {
    9227  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9228 
    9229  // 1. Process free space before this allocation.
    9230  if(lastOffset < suballoc.offset)
    9231  {
    9232  // There is free space from lastOffset to suballoc.offset.
    9233  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9234  inoutStats.unusedSize += unusedRangeSize;
    9235  ++inoutStats.unusedRangeCount;
    9236  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9237  }
    9238 
    9239  // 2. Process this allocation.
    9240  // There is allocation with suballoc.offset, suballoc.size.
    9241  ++inoutStats.allocationCount;
    9242 
    9243  // 3. Prepare for next iteration.
    9244  lastOffset = suballoc.offset + suballoc.size;
    9245  ++nextAlloc1stIndex;
    9246  }
    9247  // We are at the end.
    9248  else
    9249  {
    9250  if(lastOffset < freeSpace1stTo2ndEnd)
    9251  {
    9252  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9253  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    9254  inoutStats.unusedSize += unusedRangeSize;
    9255  ++inoutStats.unusedRangeCount;
    9256  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9257  }
    9258 
    9259  // End of loop.
    9260  lastOffset = freeSpace1stTo2ndEnd;
    9261  }
    9262  }
    9263 
    9264  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9265  {
    9266  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9267  while(lastOffset < size)
    9268  {
    9269  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9270  while(nextAlloc2ndIndex != SIZE_MAX &&
    9271  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9272  {
    9273  --nextAlloc2ndIndex;
    9274  }
    9275 
    9276  // Found non-null allocation.
    9277  if(nextAlloc2ndIndex != SIZE_MAX)
    9278  {
    9279  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9280 
    9281  // 1. Process free space before this allocation.
    9282  if(lastOffset < suballoc.offset)
    9283  {
    9284  // There is free space from lastOffset to suballoc.offset.
    9285  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9286  inoutStats.unusedSize += unusedRangeSize;
    9287  ++inoutStats.unusedRangeCount;
    9288  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9289  }
    9290 
    9291  // 2. Process this allocation.
    9292  // There is allocation with suballoc.offset, suballoc.size.
    9293  ++inoutStats.allocationCount;
    9294 
    9295  // 3. Prepare for next iteration.
    9296  lastOffset = suballoc.offset + suballoc.size;
    9297  --nextAlloc2ndIndex;
    9298  }
    9299  // We are at the end.
    9300  else
    9301  {
    9302  if(lastOffset < size)
    9303  {
    9304  // There is free space from lastOffset to size.
    9305  const VkDeviceSize unusedRangeSize = size - lastOffset;
    9306  inoutStats.unusedSize += unusedRangeSize;
    9307  ++inoutStats.unusedRangeCount;
    9308  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9309  }
    9310 
    9311  // End of loop.
    9312  lastOffset = size;
    9313  }
    9314  }
    9315  }
    9316 }
    9317 
    9318 #if VMA_STATS_STRING_ENABLED
    9319 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
    9320 {
    9321  const VkDeviceSize size = GetSize();
    9322  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9323  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9324  const size_t suballoc1stCount = suballocations1st.size();
    9325  const size_t suballoc2ndCount = suballocations2nd.size();
    9326 
    9327  // FIRST PASS
    9328 
    9329  size_t unusedRangeCount = 0;
    9330  VkDeviceSize usedBytes = 0;
    9331 
    9332  VkDeviceSize lastOffset = 0;
    9333 
    9334  size_t alloc2ndCount = 0;
    9335  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9336  {
    9337  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9338  size_t nextAlloc2ndIndex = 0;
    9339  while(lastOffset < freeSpace2ndTo1stEnd)
    9340  {
    9341  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9342  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9343  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9344  {
    9345  ++nextAlloc2ndIndex;
    9346  }
    9347 
    9348  // Found non-null allocation.
    9349  if(nextAlloc2ndIndex < suballoc2ndCount)
    9350  {
    9351  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9352 
    9353  // 1. Process free space before this allocation.
    9354  if(lastOffset < suballoc.offset)
    9355  {
    9356  // There is free space from lastOffset to suballoc.offset.
    9357  ++unusedRangeCount;
    9358  }
    9359 
    9360  // 2. Process this allocation.
    9361  // There is allocation with suballoc.offset, suballoc.size.
    9362  ++alloc2ndCount;
    9363  usedBytes += suballoc.size;
    9364 
    9365  // 3. Prepare for next iteration.
    9366  lastOffset = suballoc.offset + suballoc.size;
    9367  ++nextAlloc2ndIndex;
    9368  }
    9369  // We are at the end.
    9370  else
    9371  {
    9372  if(lastOffset < freeSpace2ndTo1stEnd)
    9373  {
    9374  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9375  ++unusedRangeCount;
    9376  }
    9377 
    9378  // End of loop.
    9379  lastOffset = freeSpace2ndTo1stEnd;
    9380  }
    9381  }
    9382  }
    9383 
    9384  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9385  size_t alloc1stCount = 0;
    9386  const VkDeviceSize freeSpace1stTo2ndEnd =
    9387  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    9388  while(lastOffset < freeSpace1stTo2ndEnd)
    9389  {
    9390  // Find next non-null allocation or move nextAllocIndex to the end.
    9391  while(nextAlloc1stIndex < suballoc1stCount &&
    9392  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9393  {
    9394  ++nextAlloc1stIndex;
    9395  }
    9396 
    9397  // Found non-null allocation.
    9398  if(nextAlloc1stIndex < suballoc1stCount)
    9399  {
    9400  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9401 
    9402  // 1. Process free space before this allocation.
    9403  if(lastOffset < suballoc.offset)
    9404  {
    9405  // There is free space from lastOffset to suballoc.offset.
    9406  ++unusedRangeCount;
    9407  }
    9408 
    9409  // 2. Process this allocation.
    9410  // There is allocation with suballoc.offset, suballoc.size.
    9411  ++alloc1stCount;
    9412  usedBytes += suballoc.size;
    9413 
    9414  // 3. Prepare for next iteration.
    9415  lastOffset = suballoc.offset + suballoc.size;
    9416  ++nextAlloc1stIndex;
    9417  }
    9418  // We are at the end.
    9419  else
    9420  {
    9421  if(lastOffset < size)
    9422  {
    9423  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9424  ++unusedRangeCount;
    9425  }
    9426 
    9427  // End of loop.
    9428  lastOffset = freeSpace1stTo2ndEnd;
    9429  }
    9430  }
    9431 
    9432  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9433  {
    9434  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9435  while(lastOffset < size)
    9436  {
    9437  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9438  while(nextAlloc2ndIndex != SIZE_MAX &&
    9439  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9440  {
    9441  --nextAlloc2ndIndex;
    9442  }
    9443 
    9444  // Found non-null allocation.
    9445  if(nextAlloc2ndIndex != SIZE_MAX)
    9446  {
    9447  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9448 
    9449  // 1. Process free space before this allocation.
    9450  if(lastOffset < suballoc.offset)
    9451  {
    9452  // There is free space from lastOffset to suballoc.offset.
    9453  ++unusedRangeCount;
    9454  }
    9455 
    9456  // 2. Process this allocation.
    9457  // There is allocation with suballoc.offset, suballoc.size.
    9458  ++alloc2ndCount;
    9459  usedBytes += suballoc.size;
    9460 
    9461  // 3. Prepare for next iteration.
    9462  lastOffset = suballoc.offset + suballoc.size;
    9463  --nextAlloc2ndIndex;
    9464  }
    9465  // We are at the end.
    9466  else
    9467  {
    9468  if(lastOffset < size)
    9469  {
    9470  // There is free space from lastOffset to size.
    9471  ++unusedRangeCount;
    9472  }
    9473 
    9474  // End of loop.
    9475  lastOffset = size;
    9476  }
    9477  }
    9478  }
    9479 
    9480  const VkDeviceSize unusedBytes = size - usedBytes;
    9481  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
    9482 
    9483  // SECOND PASS
    9484  lastOffset = 0;
    9485 
    9486  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9487  {
    9488  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9489  size_t nextAlloc2ndIndex = 0;
    9490  while(lastOffset < freeSpace2ndTo1stEnd)
    9491  {
    9492  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9493  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9494  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9495  {
    9496  ++nextAlloc2ndIndex;
    9497  }
    9498 
    9499  // Found non-null allocation.
    9500  if(nextAlloc2ndIndex < suballoc2ndCount)
    9501  {
    9502  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9503 
    9504  // 1. Process free space before this allocation.
    9505  if(lastOffset < suballoc.offset)
    9506  {
    9507  // There is free space from lastOffset to suballoc.offset.
    9508  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9509  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9510  }
    9511 
    9512  // 2. Process this allocation.
    9513  // There is allocation with suballoc.offset, suballoc.size.
    9514  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    9515 
    9516  // 3. Prepare for next iteration.
    9517  lastOffset = suballoc.offset + suballoc.size;
    9518  ++nextAlloc2ndIndex;
    9519  }
    9520  // We are at the end.
    9521  else
    9522  {
    9523  if(lastOffset < freeSpace2ndTo1stEnd)
    9524  {
    9525  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9526  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    9527  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9528  }
    9529 
    9530  // End of loop.
    9531  lastOffset = freeSpace2ndTo1stEnd;
    9532  }
    9533  }
    9534  }
    9535 
    9536  nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9537  while(lastOffset < freeSpace1stTo2ndEnd)
    9538  {
    9539  // Find next non-null allocation or move nextAllocIndex to the end.
    9540  while(nextAlloc1stIndex < suballoc1stCount &&
    9541  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9542  {
    9543  ++nextAlloc1stIndex;
    9544  }
    9545 
    9546  // Found non-null allocation.
    9547  if(nextAlloc1stIndex < suballoc1stCount)
    9548  {
    9549  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9550 
    9551  // 1. Process free space before this allocation.
    9552  if(lastOffset < suballoc.offset)
    9553  {
    9554  // There is free space from lastOffset to suballoc.offset.
    9555  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9556  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9557  }
    9558 
    9559  // 2. Process this allocation.
    9560  // There is allocation with suballoc.offset, suballoc.size.
    9561  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    9562 
    9563  // 3. Prepare for next iteration.
    9564  lastOffset = suballoc.offset + suballoc.size;
    9565  ++nextAlloc1stIndex;
    9566  }
    9567  // We are at the end.
    9568  else
    9569  {
    9570  if(lastOffset < freeSpace1stTo2ndEnd)
    9571  {
    9572  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9573  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    9574  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9575  }
    9576 
    9577  // End of loop.
    9578  lastOffset = freeSpace1stTo2ndEnd;
    9579  }
    9580  }
    9581 
    9582  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9583  {
    9584  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9585  while(lastOffset < size)
    9586  {
    9587  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9588  while(nextAlloc2ndIndex != SIZE_MAX &&
    9589  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9590  {
    9591  --nextAlloc2ndIndex;
    9592  }
    9593 
    9594  // Found non-null allocation.
    9595  if(nextAlloc2ndIndex != SIZE_MAX)
    9596  {
    9597  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9598 
    9599  // 1. Process free space before this allocation.
    9600  if(lastOffset < suballoc.offset)
    9601  {
    9602  // There is free space from lastOffset to suballoc.offset.
    9603  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9604  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9605  }
    9606 
    9607  // 2. Process this allocation.
    9608  // There is allocation with suballoc.offset, suballoc.size.
    9609  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    9610 
    9611  // 3. Prepare for next iteration.
    9612  lastOffset = suballoc.offset + suballoc.size;
    9613  --nextAlloc2ndIndex;
    9614  }
    9615  // We are at the end.
    9616  else
    9617  {
    9618  if(lastOffset < size)
    9619  {
    9620  // There is free space from lastOffset to size.
    9621  const VkDeviceSize unusedRangeSize = size - lastOffset;
    9622  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9623  }
    9624 
    9625  // End of loop.
    9626  lastOffset = size;
    9627  }
    9628  }
    9629  }
    9630 
    9631  PrintDetailedMap_End(json);
    9632 }
    9633 #endif // #if VMA_STATS_STRING_ENABLED
    9634 
    9635 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
    9636  uint32_t currentFrameIndex,
    9637  uint32_t frameInUseCount,
    9638  VkDeviceSize bufferImageGranularity,
    9639  VkDeviceSize allocSize,
    9640  VkDeviceSize allocAlignment,
    9641  bool upperAddress,
    9642  VmaSuballocationType allocType,
    9643  bool canMakeOtherLost,
    9644  uint32_t strategy,
    9645  VmaAllocationRequest* pAllocationRequest)
    9646 {
    9647  VMA_ASSERT(allocSize > 0);
    9648  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    9649  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    9650  VMA_HEAVY_ASSERT(Validate());
    9651 
    9652  const VkDeviceSize size = GetSize();
    9653  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9654  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9655 
    9656  if(upperAddress)
    9657  {
    9658  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9659  {
    9660  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
    9661  return false;
    9662  }
    9663 
    9664  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
    9665  if(allocSize > size)
    9666  {
    9667  return false;
    9668  }
    9669  VkDeviceSize resultBaseOffset = size - allocSize;
    9670  if(!suballocations2nd.empty())
    9671  {
    9672  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9673  resultBaseOffset = lastSuballoc.offset - allocSize;
    9674  if(allocSize > lastSuballoc.offset)
    9675  {
    9676  return false;
    9677  }
    9678  }
    9679 
    9680  // Start from offset equal to end of free space.
    9681  VkDeviceSize resultOffset = resultBaseOffset;
    9682 
    9683  // Apply VMA_DEBUG_MARGIN at the end.
    9684  if(VMA_DEBUG_MARGIN > 0)
    9685  {
    9686  if(resultOffset < VMA_DEBUG_MARGIN)
    9687  {
    9688  return false;
    9689  }
    9690  resultOffset -= VMA_DEBUG_MARGIN;
    9691  }
    9692 
    9693  // Apply alignment.
    9694  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
    9695 
    9696  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
    9697  // Make bigger alignment if necessary.
    9698  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    9699  {
    9700  bool bufferImageGranularityConflict = false;
    9701  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    9702  {
    9703  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    9704  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    9705  {
    9706  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
    9707  {
    9708  bufferImageGranularityConflict = true;
    9709  break;
    9710  }
    9711  }
    9712  else
    9713  // Already on previous page.
    9714  break;
    9715  }
    9716  if(bufferImageGranularityConflict)
    9717  {
    9718  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
    9719  }
    9720  }
    9721 
    9722  // There is enough free space.
    9723  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
    9724  suballocations1st.back().offset + suballocations1st.back().size :
    9725  0;
    9726  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
    9727  {
    9728  // Check previous suballocations for BufferImageGranularity conflicts.
    9729  // If conflict exists, allocation cannot be made here.
    9730  if(bufferImageGranularity > 1)
    9731  {
    9732  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    9733  {
    9734  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    9735  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    9736  {
    9737  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
    9738  {
    9739  return false;
    9740  }
    9741  }
    9742  else
    9743  {
    9744  // Already on next page.
    9745  break;
    9746  }
    9747  }
    9748  }
    9749 
    9750  // All tests passed: Success.
    9751  pAllocationRequest->offset = resultOffset;
    9752  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
    9753  pAllocationRequest->sumItemSize = 0;
    9754  // pAllocationRequest->item unused.
    9755  pAllocationRequest->itemsToMakeLostCount = 0;
    9756  return true;
    9757  }
    9758  }
    9759  else // !upperAddress
    9760  {
    9761  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9762  {
    9763  // Try to allocate at the end of 1st vector.
    9764 
    9765  VkDeviceSize resultBaseOffset = 0;
    9766  if(!suballocations1st.empty())
    9767  {
    9768  const VmaSuballocation& lastSuballoc = suballocations1st.back();
    9769  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    9770  }
    9771 
    9772  // Start from offset equal to beginning of free space.
    9773  VkDeviceSize resultOffset = resultBaseOffset;
    9774 
    9775  // Apply VMA_DEBUG_MARGIN at the beginning.
    9776  if(VMA_DEBUG_MARGIN > 0)
    9777  {
    9778  resultOffset += VMA_DEBUG_MARGIN;
    9779  }
    9780 
    9781  // Apply alignment.
    9782  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    9783 
    9784  // Check previous suballocations for BufferImageGranularity conflicts.
    9785  // Make bigger alignment if necessary.
    9786  if(bufferImageGranularity > 1 && !suballocations1st.empty())
    9787  {
    9788  bool bufferImageGranularityConflict = false;
    9789  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    9790  {
    9791  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    9792  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    9793  {
    9794  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    9795  {
    9796  bufferImageGranularityConflict = true;
    9797  break;
    9798  }
    9799  }
    9800  else
    9801  // Already on previous page.
    9802  break;
    9803  }
    9804  if(bufferImageGranularityConflict)
    9805  {
    9806  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    9807  }
    9808  }
    9809 
    9810  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
    9811  suballocations2nd.back().offset : size;
    9812 
    9813  // There is enough free space at the end after alignment.
    9814  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
    9815  {
    9816  // Check next suballocations for BufferImageGranularity conflicts.
    9817  // If conflict exists, allocation cannot be made here.
    9818  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9819  {
    9820  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    9821  {
    9822  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    9823  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    9824  {
    9825  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    9826  {
    9827  return false;
    9828  }
    9829  }
    9830  else
    9831  {
    9832  // Already on previous page.
    9833  break;
    9834  }
    9835  }
    9836  }
    9837 
    9838  // All tests passed: Success.
    9839  pAllocationRequest->offset = resultOffset;
    9840  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
    9841  pAllocationRequest->sumItemSize = 0;
    9842  // pAllocationRequest->item unused.
    9843  pAllocationRequest->itemsToMakeLostCount = 0;
    9844  return true;
    9845  }
    9846  }
    9847 
    9848  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
    9849  // beginning of 1st vector as the end of free space.
    9850  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9851  {
    9852  VMA_ASSERT(!suballocations1st.empty());
    9853 
    9854  VkDeviceSize resultBaseOffset = 0;
    9855  if(!suballocations2nd.empty())
    9856  {
    9857  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9858  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    9859  }
    9860 
    9861  // Start from offset equal to beginning of free space.
    9862  VkDeviceSize resultOffset = resultBaseOffset;
    9863 
    9864  // Apply VMA_DEBUG_MARGIN at the beginning.
    9865  if(VMA_DEBUG_MARGIN > 0)
    9866  {
    9867  resultOffset += VMA_DEBUG_MARGIN;
    9868  }
    9869 
    9870  // Apply alignment.
    9871  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    9872 
    9873  // Check previous suballocations for BufferImageGranularity conflicts.
    9874  // Make bigger alignment if necessary.
    9875  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    9876  {
    9877  bool bufferImageGranularityConflict = false;
    9878  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
    9879  {
    9880  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
    9881  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    9882  {
    9883  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    9884  {
    9885  bufferImageGranularityConflict = true;
    9886  break;
    9887  }
    9888  }
    9889  else
    9890  // Already on previous page.
    9891  break;
    9892  }
    9893  if(bufferImageGranularityConflict)
    9894  {
    9895  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    9896  }
    9897  }
    9898 
    9899  pAllocationRequest->itemsToMakeLostCount = 0;
    9900  pAllocationRequest->sumItemSize = 0;
    9901  size_t index1st = m_1stNullItemsBeginCount;
    9902 
    9903  if(canMakeOtherLost)
    9904  {
    9905  while(index1st < suballocations1st.size() &&
    9906  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
    9907  {
    9908  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
    9909  const VmaSuballocation& suballoc = suballocations1st[index1st];
    9910  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
    9911  {
    9912  // No problem.
    9913  }
    9914  else
    9915  {
    9916  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    9917  if(suballoc.hAllocation->CanBecomeLost() &&
    9918  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    9919  {
    9920  ++pAllocationRequest->itemsToMakeLostCount;
    9921  pAllocationRequest->sumItemSize += suballoc.size;
    9922  }
    9923  else
    9924  {
    9925  return false;
    9926  }
    9927  }
    9928  ++index1st;
    9929  }
    9930 
    9931  // Check next suballocations for BufferImageGranularity conflicts.
    9932  // If conflict exists, we must mark more allocations lost or fail.
    9933  if(bufferImageGranularity > 1)
    9934  {
    9935  while(index1st < suballocations1st.size())
    9936  {
    9937  const VmaSuballocation& suballoc = suballocations1st[index1st];
    9938  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
    9939  {
    9940  if(suballoc.hAllocation != VK_NULL_HANDLE)
    9941  {
    9942  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
    9943  if(suballoc.hAllocation->CanBecomeLost() &&
    9944  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    9945  {
    9946  ++pAllocationRequest->itemsToMakeLostCount;
    9947  pAllocationRequest->sumItemSize += suballoc.size;
    9948  }
    9949  else
    9950  {
    9951  return false;
    9952  }
    9953  }
    9954  }
    9955  else
    9956  {
    9957  // Already on next page.
    9958  break;
    9959  }
    9960  ++index1st;
    9961  }
    9962  }
    9963  }
    9964 
    9965  // There is enough free space at the end after alignment.
    9966  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
    9967  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
    9968  {
    9969  // Check next suballocations for BufferImageGranularity conflicts.
    9970  // If conflict exists, allocation cannot be made here.
    9971  if(bufferImageGranularity > 1)
    9972  {
    9973  for(size_t nextSuballocIndex = index1st;
    9974  nextSuballocIndex < suballocations1st.size();
    9975  nextSuballocIndex++)
    9976  {
    9977  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
    9978  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    9979  {
    9980  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    9981  {
    9982  return false;
    9983  }
    9984  }
    9985  else
    9986  {
    9987  // Already on next page.
    9988  break;
    9989  }
    9990  }
    9991  }
    9992 
    9993  // All tests passed: Success.
    9994  pAllocationRequest->offset = resultOffset;
    9995  pAllocationRequest->sumFreeSize =
    9996  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
    9997  - resultBaseOffset
    9998  - pAllocationRequest->sumItemSize;
    9999  // pAllocationRequest->item unused.
    10000  return true;
    10001  }
    10002  }
    10003  }
    10004 
    10005  return false;
    10006 }
    10007 
    10008 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
    10009  uint32_t currentFrameIndex,
    10010  uint32_t frameInUseCount,
    10011  VmaAllocationRequest* pAllocationRequest)
    10012 {
    10013  if(pAllocationRequest->itemsToMakeLostCount == 0)
    10014  {
    10015  return true;
    10016  }
    10017 
    10018  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
    10019 
    10020  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10021  size_t index1st = m_1stNullItemsBeginCount;
    10022  size_t madeLostCount = 0;
    10023  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
    10024  {
    10025  VMA_ASSERT(index1st < suballocations1st.size());
    10026  VmaSuballocation& suballoc = suballocations1st[index1st];
    10027  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    10028  {
    10029  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    10030  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
    10031  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    10032  {
    10033  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10034  suballoc.hAllocation = VK_NULL_HANDLE;
    10035  m_SumFreeSize += suballoc.size;
    10036  ++m_1stNullItemsMiddleCount;
    10037  ++madeLostCount;
    10038  }
    10039  else
    10040  {
    10041  return false;
    10042  }
    10043  }
    10044  ++index1st;
    10045  }
    10046 
    10047  CleanupAfterFree();
    10048  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
    10049 
    10050  return true;
    10051 }
    10052 
    10053 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    10054 {
    10055  uint32_t lostAllocationCount = 0;
    10056 
    10057  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10058  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    10059  {
    10060  VmaSuballocation& suballoc = suballocations1st[i];
    10061  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    10062  suballoc.hAllocation->CanBecomeLost() &&
    10063  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    10064  {
    10065  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10066  suballoc.hAllocation = VK_NULL_HANDLE;
    10067  ++m_1stNullItemsMiddleCount;
    10068  m_SumFreeSize += suballoc.size;
    10069  ++lostAllocationCount;
    10070  }
    10071  }
    10072 
    10073  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10074  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    10075  {
    10076  VmaSuballocation& suballoc = suballocations2nd[i];
    10077  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    10078  suballoc.hAllocation->CanBecomeLost() &&
    10079  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    10080  {
    10081  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10082  suballoc.hAllocation = VK_NULL_HANDLE;
    10083  ++m_2ndNullItemsCount;
    10084  ++lostAllocationCount;
    10085  }
    10086  }
    10087 
    10088  if(lostAllocationCount)
    10089  {
    10090  CleanupAfterFree();
    10091  }
    10092 
    10093  return lostAllocationCount;
    10094 }
    10095 
    10096 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
    10097 {
    10098  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10099  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    10100  {
    10101  const VmaSuballocation& suballoc = suballocations1st[i];
    10102  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    10103  {
    10104  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    10105  {
    10106  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    10107  return VK_ERROR_VALIDATION_FAILED_EXT;
    10108  }
    10109  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    10110  {
    10111  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    10112  return VK_ERROR_VALIDATION_FAILED_EXT;
    10113  }
    10114  }
    10115  }
    10116 
    10117  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10118  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    10119  {
    10120  const VmaSuballocation& suballoc = suballocations2nd[i];
    10121  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    10122  {
    10123  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    10124  {
    10125  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    10126  return VK_ERROR_VALIDATION_FAILED_EXT;
    10127  }
    10128  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    10129  {
    10130  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    10131  return VK_ERROR_VALIDATION_FAILED_EXT;
    10132  }
    10133  }
    10134  }
    10135 
    10136  return VK_SUCCESS;
    10137 }
    10138 
    10139 void VmaBlockMetadata_Linear::Alloc(
    10140  const VmaAllocationRequest& request,
    10141  VmaSuballocationType type,
    10142  VkDeviceSize allocSize,
    10143  bool upperAddress,
    10144  VmaAllocation hAllocation)
    10145 {
    10146  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
    10147 
    10148  if(upperAddress)
    10149  {
    10150  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
    10151  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
    10152  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10153  suballocations2nd.push_back(newSuballoc);
    10154  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
    10155  }
    10156  else
    10157  {
    10158  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10159 
    10160  // First allocation.
    10161  if(suballocations1st.empty())
    10162  {
    10163  suballocations1st.push_back(newSuballoc);
    10164  }
    10165  else
    10166  {
    10167  // New allocation at the end of 1st vector.
    10168  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
    10169  {
    10170  // Check if it fits before the end of the block.
    10171  VMA_ASSERT(request.offset + allocSize <= GetSize());
    10172  suballocations1st.push_back(newSuballoc);
    10173  }
    10174  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
    10175  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
    10176  {
    10177  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10178 
    10179  switch(m_2ndVectorMode)
    10180  {
    10181  case SECOND_VECTOR_EMPTY:
    10182  // First allocation from second part ring buffer.
    10183  VMA_ASSERT(suballocations2nd.empty());
    10184  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
    10185  break;
    10186  case SECOND_VECTOR_RING_BUFFER:
    10187  // 2-part ring buffer is already started.
    10188  VMA_ASSERT(!suballocations2nd.empty());
    10189  break;
    10190  case SECOND_VECTOR_DOUBLE_STACK:
    10191  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
    10192  break;
    10193  default:
    10194  VMA_ASSERT(0);
    10195  }
    10196 
    10197  suballocations2nd.push_back(newSuballoc);
    10198  }
    10199  else
    10200  {
    10201  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
    10202  }
    10203  }
    10204  }
    10205 
    10206  m_SumFreeSize -= newSuballoc.size;
    10207 }
    10208 
    10209 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
    10210 {
    10211  FreeAtOffset(allocation->GetOffset());
    10212 }
    10213 
    10214 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
    10215 {
    10216  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10217  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10218 
    10219  if(!suballocations1st.empty())
    10220  {
    10221  // First allocation: Mark it as next empty at the beginning.
    10222  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    10223  if(firstSuballoc.offset == offset)
    10224  {
    10225  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10226  firstSuballoc.hAllocation = VK_NULL_HANDLE;
    10227  m_SumFreeSize += firstSuballoc.size;
    10228  ++m_1stNullItemsBeginCount;
    10229  CleanupAfterFree();
    10230  return;
    10231  }
    10232  }
    10233 
    10234  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
    10235  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
    10236  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    10237  {
    10238  VmaSuballocation& lastSuballoc = suballocations2nd.back();
    10239  if(lastSuballoc.offset == offset)
    10240  {
    10241  m_SumFreeSize += lastSuballoc.size;
    10242  suballocations2nd.pop_back();
    10243  CleanupAfterFree();
    10244  return;
    10245  }
    10246  }
    10247  // Last allocation in 1st vector.
    10248  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
    10249  {
    10250  VmaSuballocation& lastSuballoc = suballocations1st.back();
    10251  if(lastSuballoc.offset == offset)
    10252  {
    10253  m_SumFreeSize += lastSuballoc.size;
    10254  suballocations1st.pop_back();
    10255  CleanupAfterFree();
    10256  return;
    10257  }
    10258  }
    10259 
    10260  // Item from the middle of 1st vector.
    10261  {
    10262  VmaSuballocation refSuballoc;
    10263  refSuballoc.offset = offset;
    10264  // Rest of members stays uninitialized intentionally for better performance.
    10265  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
    10266  suballocations1st.begin() + m_1stNullItemsBeginCount,
    10267  suballocations1st.end(),
    10268  refSuballoc);
    10269  if(it != suballocations1st.end())
    10270  {
    10271  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    10272  it->hAllocation = VK_NULL_HANDLE;
    10273  ++m_1stNullItemsMiddleCount;
    10274  m_SumFreeSize += it->size;
    10275  CleanupAfterFree();
    10276  return;
    10277  }
    10278  }
    10279 
    10280  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
    10281  {
    10282  // Item from the middle of 2nd vector.
    10283  VmaSuballocation refSuballoc;
    10284  refSuballoc.offset = offset;
    10285  // Rest of members stays uninitialized intentionally for better performance.
    10286  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
    10287  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
    10288  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
    10289  if(it != suballocations2nd.end())
    10290  {
    10291  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    10292  it->hAllocation = VK_NULL_HANDLE;
    10293  ++m_2ndNullItemsCount;
    10294  m_SumFreeSize += it->size;
    10295  CleanupAfterFree();
    10296  return;
    10297  }
    10298  }
    10299 
    10300  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
    10301 }
    10302 
    10303 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
    10304 {
    10305  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    10306  const size_t suballocCount = AccessSuballocations1st().size();
    10307  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
    10308 }
    10309 
    10310 void VmaBlockMetadata_Linear::CleanupAfterFree()
    10311 {
    10312  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10313  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10314 
    10315  if(IsEmpty())
    10316  {
    10317  suballocations1st.clear();
    10318  suballocations2nd.clear();
    10319  m_1stNullItemsBeginCount = 0;
    10320  m_1stNullItemsMiddleCount = 0;
    10321  m_2ndNullItemsCount = 0;
    10322  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    10323  }
    10324  else
    10325  {
    10326  const size_t suballoc1stCount = suballocations1st.size();
    10327  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    10328  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
    10329 
    10330  // Find more null items at the beginning of 1st vector.
    10331  while(m_1stNullItemsBeginCount < suballoc1stCount &&
    10332  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    10333  {
    10334  ++m_1stNullItemsBeginCount;
    10335  --m_1stNullItemsMiddleCount;
    10336  }
    10337 
    10338  // Find more null items at the end of 1st vector.
    10339  while(m_1stNullItemsMiddleCount > 0 &&
    10340  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
    10341  {
    10342  --m_1stNullItemsMiddleCount;
    10343  suballocations1st.pop_back();
    10344  }
    10345 
    10346  // Find more null items at the end of 2nd vector.
    10347  while(m_2ndNullItemsCount > 0 &&
    10348  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
    10349  {
    10350  --m_2ndNullItemsCount;
    10351  suballocations2nd.pop_back();
    10352  }
    10353 
    10354  if(ShouldCompact1st())
    10355  {
    10356  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
    10357  size_t srcIndex = m_1stNullItemsBeginCount;
    10358  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
    10359  {
    10360  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
    10361  {
    10362  ++srcIndex;
    10363  }
    10364  if(dstIndex != srcIndex)
    10365  {
    10366  suballocations1st[dstIndex] = suballocations1st[srcIndex];
    10367  }
    10368  ++srcIndex;
    10369  }
    10370  suballocations1st.resize(nonNullItemCount);
    10371  m_1stNullItemsBeginCount = 0;
    10372  m_1stNullItemsMiddleCount = 0;
    10373  }
    10374 
    10375  // 2nd vector became empty.
    10376  if(suballocations2nd.empty())
    10377  {
    10378  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    10379  }
    10380 
    10381  // 1st vector became empty.
    10382  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
    10383  {
    10384  suballocations1st.clear();
    10385  m_1stNullItemsBeginCount = 0;
    10386 
    10387  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    10388  {
    10389  // Swap 1st with 2nd. Now 2nd is empty.
    10390  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    10391  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
    10392  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
    10393  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    10394  {
    10395  ++m_1stNullItemsBeginCount;
    10396  --m_1stNullItemsMiddleCount;
    10397  }
    10398  m_2ndNullItemsCount = 0;
    10399  m_1stVectorIndex ^= 1;
    10400  }
    10401  }
    10402  }
    10403 
    10404  VMA_HEAVY_ASSERT(Validate());
    10405 }
    10406 
    10407 
    10409 // class VmaBlockMetadata_Buddy
    10410 
    10411 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
    10412  VmaBlockMetadata(hAllocator),
    10413  m_Root(VMA_NULL),
    10414  m_AllocationCount(0),
    10415  m_FreeCount(1),
    10416  m_SumFreeSize(0)
    10417 {
    10418  memset(m_FreeList, 0, sizeof(m_FreeList));
    10419 }
    10420 
    10421 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
    10422 {
    10423  DeleteNode(m_Root);
    10424 }
    10425 
    10426 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
    10427 {
    10428  VmaBlockMetadata::Init(size);
    10429 
    10430  m_UsableSize = VmaPrevPow2(size);
    10431  m_SumFreeSize = m_UsableSize;
    10432 
    10433  // Calculate m_LevelCount.
    10434  m_LevelCount = 1;
    10435  while(m_LevelCount < MAX_LEVELS &&
    10436  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
    10437  {
    10438  ++m_LevelCount;
    10439  }
    10440 
    10441  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
    10442  rootNode->offset = 0;
    10443  rootNode->type = Node::TYPE_FREE;
    10444  rootNode->parent = VMA_NULL;
    10445  rootNode->buddy = VMA_NULL;
    10446 
    10447  m_Root = rootNode;
    10448  AddToFreeListFront(0, rootNode);
    10449 }
    10450 
    10451 bool VmaBlockMetadata_Buddy::Validate() const
    10452 {
    10453  // Validate tree.
    10454  ValidationContext ctx;
    10455  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
    10456  {
    10457  VMA_VALIDATE(false && "ValidateNode failed.");
    10458  }
    10459  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
    10460  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
    10461 
    10462  // Validate free node lists.
    10463  for(uint32_t level = 0; level < m_LevelCount; ++level)
    10464  {
    10465  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
    10466  m_FreeList[level].front->free.prev == VMA_NULL);
    10467 
    10468  for(Node* node = m_FreeList[level].front;
    10469  node != VMA_NULL;
    10470  node = node->free.next)
    10471  {
    10472  VMA_VALIDATE(node->type == Node::TYPE_FREE);
    10473 
    10474  if(node->free.next == VMA_NULL)
    10475  {
    10476  VMA_VALIDATE(m_FreeList[level].back == node);
    10477  }
    10478  else
    10479  {
    10480  VMA_VALIDATE(node->free.next->free.prev == node);
    10481  }
    10482  }
    10483  }
    10484 
    10485  // Validate that free lists ar higher levels are empty.
    10486  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
    10487  {
    10488  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
    10489  }
    10490 
    10491  return true;
    10492 }
    10493 
    10494 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
    10495 {
    10496  for(uint32_t level = 0; level < m_LevelCount; ++level)
    10497  {
    10498  if(m_FreeList[level].front != VMA_NULL)
    10499  {
    10500  return LevelToNodeSize(level);
    10501  }
    10502  }
    10503  return 0;
    10504 }
    10505 
    10506 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    10507 {
    10508  const VkDeviceSize unusableSize = GetUnusableSize();
    10509 
    10510  outInfo.blockCount = 1;
    10511 
    10512  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
    10513  outInfo.usedBytes = outInfo.unusedBytes = 0;
    10514 
    10515  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
    10516  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
    10517  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
    10518 
    10519  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
    10520 
    10521  if(unusableSize > 0)
    10522  {
    10523  ++outInfo.unusedRangeCount;
    10524  outInfo.unusedBytes += unusableSize;
    10525  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
    10526  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
    10527  }
    10528 }
    10529 
    10530 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
    10531 {
    10532  const VkDeviceSize unusableSize = GetUnusableSize();
    10533 
    10534  inoutStats.size += GetSize();
    10535  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
    10536  inoutStats.allocationCount += m_AllocationCount;
    10537  inoutStats.unusedRangeCount += m_FreeCount;
    10538  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    10539 
    10540  if(unusableSize > 0)
    10541  {
    10542  ++inoutStats.unusedRangeCount;
    10543  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
    10544  }
    10545 }
    10546 
    10547 #if VMA_STATS_STRING_ENABLED
    10548 
    10549 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
    10550 {
    10551  // TODO optimize
    10552  VmaStatInfo stat;
    10553  CalcAllocationStatInfo(stat);
    10554 
    10555  PrintDetailedMap_Begin(
    10556  json,
    10557  stat.unusedBytes,
    10558  stat.allocationCount,
    10559  stat.unusedRangeCount);
    10560 
    10561  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
    10562 
    10563  const VkDeviceSize unusableSize = GetUnusableSize();
    10564  if(unusableSize > 0)
    10565  {
    10566  PrintDetailedMap_UnusedRange(json,
    10567  m_UsableSize, // offset
    10568  unusableSize); // size
    10569  }
    10570 
    10571  PrintDetailedMap_End(json);
    10572 }
    10573 
    10574 #endif // #if VMA_STATS_STRING_ENABLED
    10575 
    10576 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
    10577  uint32_t currentFrameIndex,
    10578  uint32_t frameInUseCount,
    10579  VkDeviceSize bufferImageGranularity,
    10580  VkDeviceSize allocSize,
    10581  VkDeviceSize allocAlignment,
    10582  bool upperAddress,
    10583  VmaSuballocationType allocType,
    10584  bool canMakeOtherLost,
    10585  uint32_t strategy,
    10586  VmaAllocationRequest* pAllocationRequest)
    10587 {
    10588  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
    10589 
    10590  // Simple way to respect bufferImageGranularity. May be optimized some day.
    10591  // Whenever it might be an OPTIMAL image...
    10592  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
    10593  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    10594  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
    10595  {
    10596  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
    10597  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
    10598  }
    10599 
    10600  if(allocSize > m_UsableSize)
    10601  {
    10602  return false;
    10603  }
    10604 
    10605  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    10606  for(uint32_t level = targetLevel + 1; level--; )
    10607  {
    10608  for(Node* freeNode = m_FreeList[level].front;
    10609  freeNode != VMA_NULL;
    10610  freeNode = freeNode->free.next)
    10611  {
    10612  if(freeNode->offset % allocAlignment == 0)
    10613  {
    10614  pAllocationRequest->offset = freeNode->offset;
    10615  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
    10616  pAllocationRequest->sumItemSize = 0;
    10617  pAllocationRequest->itemsToMakeLostCount = 0;
    10618  pAllocationRequest->customData = (void*)(uintptr_t)level;
    10619  return true;
    10620  }
    10621  }
    10622  }
    10623 
    10624  return false;
    10625 }
    10626 
    10627 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
    10628  uint32_t currentFrameIndex,
    10629  uint32_t frameInUseCount,
    10630  VmaAllocationRequest* pAllocationRequest)
    10631 {
    10632  /*
    10633  Lost allocations are not supported in buddy allocator at the moment.
    10634  Support might be added in the future.
    10635  */
    10636  return pAllocationRequest->itemsToMakeLostCount == 0;
    10637 }
    10638 
    10639 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    10640 {
    10641  /*
    10642  Lost allocations are not supported in buddy allocator at the moment.
    10643  Support might be added in the future.
    10644  */
    10645  return 0;
    10646 }
    10647 
    10648 void VmaBlockMetadata_Buddy::Alloc(
    10649  const VmaAllocationRequest& request,
    10650  VmaSuballocationType type,
    10651  VkDeviceSize allocSize,
    10652  bool upperAddress,
    10653  VmaAllocation hAllocation)
    10654 {
    10655  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    10656  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
    10657 
    10658  Node* currNode = m_FreeList[currLevel].front;
    10659  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    10660  while(currNode->offset != request.offset)
    10661  {
    10662  currNode = currNode->free.next;
    10663  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    10664  }
    10665 
    10666  // Go down, splitting free nodes.
    10667  while(currLevel < targetLevel)
    10668  {
    10669  // currNode is already first free node at currLevel.
    10670  // Remove it from list of free nodes at this currLevel.
    10671  RemoveFromFreeList(currLevel, currNode);
    10672 
    10673  const uint32_t childrenLevel = currLevel + 1;
    10674 
    10675  // Create two free sub-nodes.
    10676  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
    10677  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
    10678 
    10679  leftChild->offset = currNode->offset;
    10680  leftChild->type = Node::TYPE_FREE;
    10681  leftChild->parent = currNode;
    10682  leftChild->buddy = rightChild;
    10683 
    10684  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
    10685  rightChild->type = Node::TYPE_FREE;
    10686  rightChild->parent = currNode;
    10687  rightChild->buddy = leftChild;
    10688 
    10689  // Convert current currNode to split type.
    10690  currNode->type = Node::TYPE_SPLIT;
    10691  currNode->split.leftChild = leftChild;
    10692 
    10693  // Add child nodes to free list. Order is important!
    10694  AddToFreeListFront(childrenLevel, rightChild);
    10695  AddToFreeListFront(childrenLevel, leftChild);
    10696 
    10697  ++m_FreeCount;
    10698  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
    10699  ++currLevel;
    10700  currNode = m_FreeList[currLevel].front;
    10701 
    10702  /*
    10703  We can be sure that currNode, as left child of node previously split,
    10704  also fullfills the alignment requirement.
    10705  */
    10706  }
    10707 
    10708  // Remove from free list.
    10709  VMA_ASSERT(currLevel == targetLevel &&
    10710  currNode != VMA_NULL &&
    10711  currNode->type == Node::TYPE_FREE);
    10712  RemoveFromFreeList(currLevel, currNode);
    10713 
    10714  // Convert to allocation node.
    10715  currNode->type = Node::TYPE_ALLOCATION;
    10716  currNode->allocation.alloc = hAllocation;
    10717 
    10718  ++m_AllocationCount;
    10719  --m_FreeCount;
    10720  m_SumFreeSize -= allocSize;
    10721 }
    10722 
    10723 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
    10724 {
    10725  if(node->type == Node::TYPE_SPLIT)
    10726  {
    10727  DeleteNode(node->split.leftChild->buddy);
    10728  DeleteNode(node->split.leftChild);
    10729  }
    10730 
    10731  vma_delete(GetAllocationCallbacks(), node);
    10732 }
    10733 
    10734 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
    10735 {
    10736  VMA_VALIDATE(level < m_LevelCount);
    10737  VMA_VALIDATE(curr->parent == parent);
    10738  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
    10739  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
    10740  switch(curr->type)
    10741  {
    10742  case Node::TYPE_FREE:
    10743  // curr->free.prev, next are validated separately.
    10744  ctx.calculatedSumFreeSize += levelNodeSize;
    10745  ++ctx.calculatedFreeCount;
    10746  break;
    10747  case Node::TYPE_ALLOCATION:
    10748  ++ctx.calculatedAllocationCount;
    10749  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
    10750  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
    10751  break;
    10752  case Node::TYPE_SPLIT:
    10753  {
    10754  const uint32_t childrenLevel = level + 1;
    10755  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
    10756  const Node* const leftChild = curr->split.leftChild;
    10757  VMA_VALIDATE(leftChild != VMA_NULL);
    10758  VMA_VALIDATE(leftChild->offset == curr->offset);
    10759  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
    10760  {
    10761  VMA_VALIDATE(false && "ValidateNode for left child failed.");
    10762  }
    10763  const Node* const rightChild = leftChild->buddy;
    10764  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
    10765  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
    10766  {
    10767  VMA_VALIDATE(false && "ValidateNode for right child failed.");
    10768  }
    10769  }
    10770  break;
    10771  default:
    10772  return false;
    10773  }
    10774 
    10775  return true;
    10776 }
    10777 
    10778 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
    10779 {
    10780  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
    10781  uint32_t level = 0;
    10782  VkDeviceSize currLevelNodeSize = m_UsableSize;
    10783  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
    10784  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
    10785  {
    10786  ++level;
    10787  currLevelNodeSize = nextLevelNodeSize;
    10788  nextLevelNodeSize = currLevelNodeSize >> 1;
    10789  }
    10790  return level;
    10791 }
    10792 
    10793 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
    10794 {
    10795  // Find node and level.
    10796  Node* node = m_Root;
    10797  VkDeviceSize nodeOffset = 0;
    10798  uint32_t level = 0;
    10799  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
    10800  while(node->type == Node::TYPE_SPLIT)
    10801  {
    10802  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
    10803  if(offset < nodeOffset + nextLevelSize)
    10804  {
    10805  node = node->split.leftChild;
    10806  }
    10807  else
    10808  {
    10809  node = node->split.leftChild->buddy;
    10810  nodeOffset += nextLevelSize;
    10811  }
    10812  ++level;
    10813  levelNodeSize = nextLevelSize;
    10814  }
    10815 
    10816  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
    10817  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
    10818 
    10819  ++m_FreeCount;
    10820  --m_AllocationCount;
    10821  m_SumFreeSize += alloc->GetSize();
    10822 
    10823  node->type = Node::TYPE_FREE;
    10824 
    10825  // Join free nodes if possible.
    10826  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
    10827  {
    10828  RemoveFromFreeList(level, node->buddy);
    10829  Node* const parent = node->parent;
    10830 
    10831  vma_delete(GetAllocationCallbacks(), node->buddy);
    10832  vma_delete(GetAllocationCallbacks(), node);
    10833  parent->type = Node::TYPE_FREE;
    10834 
    10835  node = parent;
    10836  --level;
    10837  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
    10838  --m_FreeCount;
    10839  }
    10840 
    10841  AddToFreeListFront(level, node);
    10842 }
    10843 
    10844 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
    10845 {
    10846  switch(node->type)
    10847  {
    10848  case Node::TYPE_FREE:
    10849  ++outInfo.unusedRangeCount;
    10850  outInfo.unusedBytes += levelNodeSize;
    10851  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
    10852  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
    10853  break;
    10854  case Node::TYPE_ALLOCATION:
    10855  {
    10856  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    10857  ++outInfo.allocationCount;
    10858  outInfo.usedBytes += allocSize;
    10859  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
    10860  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
    10861 
    10862  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
    10863  if(unusedRangeSize > 0)
    10864  {
    10865  ++outInfo.unusedRangeCount;
    10866  outInfo.unusedBytes += unusedRangeSize;
    10867  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
    10868  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
    10869  }
    10870  }
    10871  break;
    10872  case Node::TYPE_SPLIT:
    10873  {
    10874  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    10875  const Node* const leftChild = node->split.leftChild;
    10876  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
    10877  const Node* const rightChild = leftChild->buddy;
    10878  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
    10879  }
    10880  break;
    10881  default:
    10882  VMA_ASSERT(0);
    10883  }
    10884 }
    10885 
    10886 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
    10887 {
    10888  VMA_ASSERT(node->type == Node::TYPE_FREE);
    10889 
    10890  // List is empty.
    10891  Node* const frontNode = m_FreeList[level].front;
    10892  if(frontNode == VMA_NULL)
    10893  {
    10894  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
    10895  node->free.prev = node->free.next = VMA_NULL;
    10896  m_FreeList[level].front = m_FreeList[level].back = node;
    10897  }
    10898  else
    10899  {
    10900  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
    10901  node->free.prev = VMA_NULL;
    10902  node->free.next = frontNode;
    10903  frontNode->free.prev = node;
    10904  m_FreeList[level].front = node;
    10905  }
    10906 }
    10907 
    10908 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
    10909 {
    10910  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
    10911 
    10912  // It is at the front.
    10913  if(node->free.prev == VMA_NULL)
    10914  {
    10915  VMA_ASSERT(m_FreeList[level].front == node);
    10916  m_FreeList[level].front = node->free.next;
    10917  }
    10918  else
    10919  {
    10920  Node* const prevFreeNode = node->free.prev;
    10921  VMA_ASSERT(prevFreeNode->free.next == node);
    10922  prevFreeNode->free.next = node->free.next;
    10923  }
    10924 
    10925  // It is at the back.
    10926  if(node->free.next == VMA_NULL)
    10927  {
    10928  VMA_ASSERT(m_FreeList[level].back == node);
    10929  m_FreeList[level].back = node->free.prev;
    10930  }
    10931  else
    10932  {
    10933  Node* const nextFreeNode = node->free.next;
    10934  VMA_ASSERT(nextFreeNode->free.prev == node);
    10935  nextFreeNode->free.prev = node->free.prev;
    10936  }
    10937 }
    10938 
    10939 #if VMA_STATS_STRING_ENABLED
    10940 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
    10941 {
    10942  switch(node->type)
    10943  {
    10944  case Node::TYPE_FREE:
    10945  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
    10946  break;
    10947  case Node::TYPE_ALLOCATION:
    10948  {
    10949  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
    10950  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    10951  if(allocSize < levelNodeSize)
    10952  {
    10953  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
    10954  }
    10955  }
    10956  break;
    10957  case Node::TYPE_SPLIT:
    10958  {
    10959  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    10960  const Node* const leftChild = node->split.leftChild;
    10961  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
    10962  const Node* const rightChild = leftChild->buddy;
    10963  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
    10964  }
    10965  break;
    10966  default:
    10967  VMA_ASSERT(0);
    10968  }
    10969 }
    10970 #endif // #if VMA_STATS_STRING_ENABLED
    10971 
    10972 
    10974 // class VmaDeviceMemoryBlock
    10975 
    10976 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    10977  m_pMetadata(VMA_NULL),
    10978  m_MemoryTypeIndex(UINT32_MAX),
    10979  m_Id(0),
    10980  m_hMemory(VK_NULL_HANDLE),
    10981  m_MapCount(0),
    10982  m_pMappedData(VMA_NULL)
    10983 {
    10984 }
    10985 
    10986 void VmaDeviceMemoryBlock::Init(
    10987  VmaAllocator hAllocator,
    10988  uint32_t newMemoryTypeIndex,
    10989  VkDeviceMemory newMemory,
    10990  VkDeviceSize newSize,
    10991  uint32_t id,
    10992  uint32_t algorithm)
    10993 {
    10994  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    10995 
    10996  m_MemoryTypeIndex = newMemoryTypeIndex;
    10997  m_Id = id;
    10998  m_hMemory = newMemory;
    10999 
    11000  switch(algorithm)
    11001  {
    11003  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
    11004  break;
    11006  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
    11007  break;
    11008  default:
    11009  VMA_ASSERT(0);
    11010  // Fall-through.
    11011  case 0:
    11012  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
    11013  }
    11014  m_pMetadata->Init(newSize);
    11015 }
    11016 
    11017 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    11018 {
    11019  // This is the most important assert in the entire library.
    11020  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    11021  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    11022 
    11023  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    11024  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
    11025  m_hMemory = VK_NULL_HANDLE;
    11026 
    11027  vma_delete(allocator, m_pMetadata);
    11028  m_pMetadata = VMA_NULL;
    11029 }
    11030 
    11031 bool VmaDeviceMemoryBlock::Validate() const
    11032 {
    11033  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
    11034  (m_pMetadata->GetSize() != 0));
    11035 
    11036  return m_pMetadata->Validate();
    11037 }
    11038 
    11039 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
    11040 {
    11041  void* pData = nullptr;
    11042  VkResult res = Map(hAllocator, 1, &pData);
    11043  if(res != VK_SUCCESS)
    11044  {
    11045  return res;
    11046  }
    11047 
    11048  res = m_pMetadata->CheckCorruption(pData);
    11049 
    11050  Unmap(hAllocator, 1);
    11051 
    11052  return res;
    11053 }
    11054 
    11055 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
    11056 {
    11057  if(count == 0)
    11058  {
    11059  return VK_SUCCESS;
    11060  }
    11061 
    11062  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11063  if(m_MapCount != 0)
    11064  {
    11065  m_MapCount += count;
    11066  VMA_ASSERT(m_pMappedData != VMA_NULL);
    11067  if(ppData != VMA_NULL)
    11068  {
    11069  *ppData = m_pMappedData;
    11070  }
    11071  return VK_SUCCESS;
    11072  }
    11073  else
    11074  {
    11075  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    11076  hAllocator->m_hDevice,
    11077  m_hMemory,
    11078  0, // offset
    11079  VK_WHOLE_SIZE,
    11080  0, // flags
    11081  &m_pMappedData);
    11082  if(result == VK_SUCCESS)
    11083  {
    11084  if(ppData != VMA_NULL)
    11085  {
    11086  *ppData = m_pMappedData;
    11087  }
    11088  m_MapCount = count;
    11089  }
    11090  return result;
    11091  }
    11092 }
    11093 
    11094 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
    11095 {
    11096  if(count == 0)
    11097  {
    11098  return;
    11099  }
    11100 
    11101  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11102  if(m_MapCount >= count)
    11103  {
    11104  m_MapCount -= count;
    11105  if(m_MapCount == 0)
    11106  {
    11107  m_pMappedData = VMA_NULL;
    11108  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
    11109  }
    11110  }
    11111  else
    11112  {
    11113  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    11114  }
    11115 }
    11116 
    11117 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    11118 {
    11119  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    11120  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    11121 
    11122  void* pData;
    11123  VkResult res = Map(hAllocator, 1, &pData);
    11124  if(res != VK_SUCCESS)
    11125  {
    11126  return res;
    11127  }
    11128 
    11129  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
    11130  VmaWriteMagicValue(pData, allocOffset + allocSize);
    11131 
    11132  Unmap(hAllocator, 1);
    11133 
    11134  return VK_SUCCESS;
    11135 }
    11136 
    11137 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    11138 {
    11139  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    11140  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    11141 
    11142  void* pData;
    11143  VkResult res = Map(hAllocator, 1, &pData);
    11144  if(res != VK_SUCCESS)
    11145  {
    11146  return res;
    11147  }
    11148 
    11149  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
    11150  {
    11151  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
    11152  }
    11153  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
    11154  {
    11155  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
    11156  }
    11157 
    11158  Unmap(hAllocator, 1);
    11159 
    11160  return VK_SUCCESS;
    11161 }
    11162 
    11163 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
    11164  const VmaAllocator hAllocator,
    11165  const VmaAllocation hAllocation,
    11166  VkBuffer hBuffer)
    11167 {
    11168  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    11169  hAllocation->GetBlock() == this);
    11170  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    11171  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11172  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
    11173  hAllocator->m_hDevice,
    11174  hBuffer,
    11175  m_hMemory,
    11176  hAllocation->GetOffset());
    11177 }
    11178 
    11179 VkResult VmaDeviceMemoryBlock::BindImageMemory(
    11180  const VmaAllocator hAllocator,
    11181  const VmaAllocation hAllocation,
    11182  VkImage hImage)
    11183 {
    11184  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    11185  hAllocation->GetBlock() == this);
    11186  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    11187  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11188  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
    11189  hAllocator->m_hDevice,
    11190  hImage,
    11191  m_hMemory,
    11192  hAllocation->GetOffset());
    11193 }
    11194 
    11195 static void InitStatInfo(VmaStatInfo& outInfo)
    11196 {
    11197  memset(&outInfo, 0, sizeof(outInfo));
    11198  outInfo.allocationSizeMin = UINT64_MAX;
    11199  outInfo.unusedRangeSizeMin = UINT64_MAX;
    11200 }
    11201 
    11202 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    11203 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    11204 {
    11205  inoutInfo.blockCount += srcInfo.blockCount;
    11206  inoutInfo.allocationCount += srcInfo.allocationCount;
    11207  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    11208  inoutInfo.usedBytes += srcInfo.usedBytes;
    11209  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    11210  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    11211  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    11212  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    11213  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    11214 }
    11215 
    11216 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    11217 {
    11218  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    11219  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    11220  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    11221  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    11222 }
    11223 
    11224 VmaPool_T::VmaPool_T(
    11225  VmaAllocator hAllocator,
    11226  const VmaPoolCreateInfo& createInfo,
    11227  VkDeviceSize preferredBlockSize) :
    11228  m_BlockVector(
    11229  hAllocator,
    11230  createInfo.memoryTypeIndex,
    11231  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
    11232  createInfo.minBlockCount,
    11233  createInfo.maxBlockCount,
    11234  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    11235  createInfo.frameInUseCount,
    11236  true, // isCustomPool
    11237  createInfo.blockSize != 0, // explicitBlockSize
    11238  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
    11239  m_Id(0)
    11240 {
    11241 }
    11242 
    11243 VmaPool_T::~VmaPool_T()
    11244 {
    11245 }
    11246 
    11247 #if VMA_STATS_STRING_ENABLED
    11248 
    11249 #endif // #if VMA_STATS_STRING_ENABLED
    11250 
    11251 VmaBlockVector::VmaBlockVector(
    11252  VmaAllocator hAllocator,
    11253  uint32_t memoryTypeIndex,
    11254  VkDeviceSize preferredBlockSize,
    11255  size_t minBlockCount,
    11256  size_t maxBlockCount,
    11257  VkDeviceSize bufferImageGranularity,
    11258  uint32_t frameInUseCount,
    11259  bool isCustomPool,
    11260  bool explicitBlockSize,
    11261  uint32_t algorithm) :
    11262  m_hAllocator(hAllocator),
    11263  m_MemoryTypeIndex(memoryTypeIndex),
    11264  m_PreferredBlockSize(preferredBlockSize),
    11265  m_MinBlockCount(minBlockCount),
    11266  m_MaxBlockCount(maxBlockCount),
    11267  m_BufferImageGranularity(bufferImageGranularity),
    11268  m_FrameInUseCount(frameInUseCount),
    11269  m_IsCustomPool(isCustomPool),
    11270  m_ExplicitBlockSize(explicitBlockSize),
    11271  m_Algorithm(algorithm),
    11272  m_HasEmptyBlock(false),
    11273  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    11274  m_NextBlockId(0)
    11275 {
    11276 }
    11277 
    11278 VmaBlockVector::~VmaBlockVector()
    11279 {
    11280  for(size_t i = m_Blocks.size(); i--; )
    11281  {
    11282  m_Blocks[i]->Destroy(m_hAllocator);
    11283  vma_delete(m_hAllocator, m_Blocks[i]);
    11284  }
    11285 }
    11286 
    11287 VkResult VmaBlockVector::CreateMinBlocks()
    11288 {
    11289  for(size_t i = 0; i < m_MinBlockCount; ++i)
    11290  {
    11291  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    11292  if(res != VK_SUCCESS)
    11293  {
    11294  return res;
    11295  }
    11296  }
    11297  return VK_SUCCESS;
    11298 }
    11299 
    11300 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    11301 {
    11302  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    11303 
    11304  const size_t blockCount = m_Blocks.size();
    11305 
    11306  pStats->size = 0;
    11307  pStats->unusedSize = 0;
    11308  pStats->allocationCount = 0;
    11309  pStats->unusedRangeCount = 0;
    11310  pStats->unusedRangeSizeMax = 0;
    11311  pStats->blockCount = blockCount;
    11312 
    11313  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11314  {
    11315  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11316  VMA_ASSERT(pBlock);
    11317  VMA_HEAVY_ASSERT(pBlock->Validate());
    11318  pBlock->m_pMetadata->AddPoolStats(*pStats);
    11319  }
    11320 }
    11321 
    11322 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
    11323 {
    11324  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    11325  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
    11326  (VMA_DEBUG_MARGIN > 0) &&
    11327  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
    11328 }
    11329 
    11330 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    11331 
    11332 VkResult VmaBlockVector::Allocate(
    11333  VmaPool hCurrentPool,
    11334  uint32_t currentFrameIndex,
    11335  VkDeviceSize size,
    11336  VkDeviceSize alignment,
    11337  const VmaAllocationCreateInfo& createInfo,
    11338  VmaSuballocationType suballocType,
    11339  size_t allocationCount,
    11340  VmaAllocation* pAllocations)
    11341 {
    11342  size_t allocIndex;
    11343  VkResult res = VK_SUCCESS;
    11344 
    11345  {
    11346  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
    11347  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    11348  {
    11349  res = AllocatePage(
    11350  hCurrentPool,
    11351  currentFrameIndex,
    11352  size,
    11353  alignment,
    11354  createInfo,
    11355  suballocType,
    11356  pAllocations + allocIndex);
    11357  if(res != VK_SUCCESS)
    11358  {
    11359  break;
    11360  }
    11361  }
    11362  }
    11363 
    11364  if(res != VK_SUCCESS)
    11365  {
    11366  // Free all already created allocations.
    11367  while(allocIndex--)
    11368  {
    11369  Free(pAllocations[allocIndex]);
    11370  }
    11371  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
    11372  }
    11373 
    11374  return res;
    11375 }
    11376 
    11377 VkResult VmaBlockVector::AllocatePage(
    11378  VmaPool hCurrentPool,
    11379  uint32_t currentFrameIndex,
    11380  VkDeviceSize size,
    11381  VkDeviceSize alignment,
    11382  const VmaAllocationCreateInfo& createInfo,
    11383  VmaSuballocationType suballocType,
    11384  VmaAllocation* pAllocation)
    11385 {
    11386  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    11387  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    11388  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    11389  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    11390  const bool canCreateNewBlock =
    11391  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    11392  (m_Blocks.size() < m_MaxBlockCount);
    11393  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
    11394 
    11395  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
    11396  // Which in turn is available only when maxBlockCount = 1.
    11397  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
    11398  {
    11399  canMakeOtherLost = false;
    11400  }
    11401 
    11402  // Upper address can only be used with linear allocator and within single memory block.
    11403  if(isUpperAddress &&
    11404  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
    11405  {
    11406  return VK_ERROR_FEATURE_NOT_PRESENT;
    11407  }
    11408 
    11409  // Validate strategy.
    11410  switch(strategy)
    11411  {
    11412  case 0:
    11414  break;
    11418  break;
    11419  default:
    11420  return VK_ERROR_FEATURE_NOT_PRESENT;
    11421  }
    11422 
    11423  // Early reject: requested allocation size is larger that maximum block size for this block vector.
    11424  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
    11425  {
    11426  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    11427  }
    11428 
    11429  /*
    11430  Under certain condition, this whole section can be skipped for optimization, so
    11431  we move on directly to trying to allocate with canMakeOtherLost. That's the case
    11432  e.g. for custom pools with linear algorithm.
    11433  */
    11434  if(!canMakeOtherLost || canCreateNewBlock)
    11435  {
    11436  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    11437  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
    11439 
    11440  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    11441  {
    11442  // Use only last block.
    11443  if(!m_Blocks.empty())
    11444  {
    11445  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
    11446  VMA_ASSERT(pCurrBlock);
    11447  VkResult res = AllocateFromBlock(
    11448  pCurrBlock,
    11449  hCurrentPool,
    11450  currentFrameIndex,
    11451  size,
    11452  alignment,
    11453  allocFlagsCopy,
    11454  createInfo.pUserData,
    11455  suballocType,
    11456  strategy,
    11457  pAllocation);
    11458  if(res == VK_SUCCESS)
    11459  {
    11460  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
    11461  return VK_SUCCESS;
    11462  }
    11463  }
    11464  }
    11465  else
    11466  {
    11468  {
    11469  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    11470  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    11471  {
    11472  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11473  VMA_ASSERT(pCurrBlock);
    11474  VkResult res = AllocateFromBlock(
    11475  pCurrBlock,
    11476  hCurrentPool,
    11477  currentFrameIndex,
    11478  size,
    11479  alignment,
    11480  allocFlagsCopy,
    11481  createInfo.pUserData,
    11482  suballocType,
    11483  strategy,
    11484  pAllocation);
    11485  if(res == VK_SUCCESS)
    11486  {
    11487  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    11488  return VK_SUCCESS;
    11489  }
    11490  }
    11491  }
    11492  else // WORST_FIT, FIRST_FIT
    11493  {
    11494  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    11495  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    11496  {
    11497  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11498  VMA_ASSERT(pCurrBlock);
    11499  VkResult res = AllocateFromBlock(
    11500  pCurrBlock,
    11501  hCurrentPool,
    11502  currentFrameIndex,
    11503  size,
    11504  alignment,
    11505  allocFlagsCopy,
    11506  createInfo.pUserData,
    11507  suballocType,
    11508  strategy,
    11509  pAllocation);
    11510  if(res == VK_SUCCESS)
    11511  {
    11512  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    11513  return VK_SUCCESS;
    11514  }
    11515  }
    11516  }
    11517  }
    11518 
    11519  // 2. Try to create new block.
    11520  if(canCreateNewBlock)
    11521  {
    11522  // Calculate optimal size for new block.
    11523  VkDeviceSize newBlockSize = m_PreferredBlockSize;
    11524  uint32_t newBlockSizeShift = 0;
    11525  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
    11526 
    11527  if(!m_ExplicitBlockSize)
    11528  {
    11529  // Allocate 1/8, 1/4, 1/2 as first blocks.
    11530  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
    11531  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
    11532  {
    11533  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    11534  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
    11535  {
    11536  newBlockSize = smallerNewBlockSize;
    11537  ++newBlockSizeShift;
    11538  }
    11539  else
    11540  {
    11541  break;
    11542  }
    11543  }
    11544  }
    11545 
    11546  size_t newBlockIndex = 0;
    11547  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
    11548  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
    11549  if(!m_ExplicitBlockSize)
    11550  {
    11551  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
    11552  {
    11553  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    11554  if(smallerNewBlockSize >= size)
    11555  {
    11556  newBlockSize = smallerNewBlockSize;
    11557  ++newBlockSizeShift;
    11558  res = CreateBlock(newBlockSize, &newBlockIndex);
    11559  }
    11560  else
    11561  {
    11562  break;
    11563  }
    11564  }
    11565  }
    11566 
    11567  if(res == VK_SUCCESS)
    11568  {
    11569  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    11570  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
    11571 
    11572  res = AllocateFromBlock(
    11573  pBlock,
    11574  hCurrentPool,
    11575  currentFrameIndex,
    11576  size,
    11577  alignment,
    11578  allocFlagsCopy,
    11579  createInfo.pUserData,
    11580  suballocType,
    11581  strategy,
    11582  pAllocation);
    11583  if(res == VK_SUCCESS)
    11584  {
    11585  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
    11586  return VK_SUCCESS;
    11587  }
    11588  else
    11589  {
    11590  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
    11591  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    11592  }
    11593  }
    11594  }
    11595  }
    11596 
    11597  // 3. Try to allocate from existing blocks with making other allocations lost.
    11598  if(canMakeOtherLost)
    11599  {
    11600  uint32_t tryIndex = 0;
    11601  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    11602  {
    11603  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    11604  VmaAllocationRequest bestRequest = {};
    11605  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    11606 
    11607  // 1. Search existing allocations.
    11609  {
    11610  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    11611  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    11612  {
    11613  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11614  VMA_ASSERT(pCurrBlock);
    11615  VmaAllocationRequest currRequest = {};
    11616  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    11617  currentFrameIndex,
    11618  m_FrameInUseCount,
    11619  m_BufferImageGranularity,
    11620  size,
    11621  alignment,
    11622  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    11623  suballocType,
    11624  canMakeOtherLost,
    11625  strategy,
    11626  &currRequest))
    11627  {
    11628  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    11629  if(pBestRequestBlock == VMA_NULL ||
    11630  currRequestCost < bestRequestCost)
    11631  {
    11632  pBestRequestBlock = pCurrBlock;
    11633  bestRequest = currRequest;
    11634  bestRequestCost = currRequestCost;
    11635 
    11636  if(bestRequestCost == 0)
    11637  {
    11638  break;
    11639  }
    11640  }
    11641  }
    11642  }
    11643  }
    11644  else // WORST_FIT, FIRST_FIT
    11645  {
    11646  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    11647  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    11648  {
    11649  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11650  VMA_ASSERT(pCurrBlock);
    11651  VmaAllocationRequest currRequest = {};
    11652  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    11653  currentFrameIndex,
    11654  m_FrameInUseCount,
    11655  m_BufferImageGranularity,
    11656  size,
    11657  alignment,
    11658  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    11659  suballocType,
    11660  canMakeOtherLost,
    11661  strategy,
    11662  &currRequest))
    11663  {
    11664  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    11665  if(pBestRequestBlock == VMA_NULL ||
    11666  currRequestCost < bestRequestCost ||
    11668  {
    11669  pBestRequestBlock = pCurrBlock;
    11670  bestRequest = currRequest;
    11671  bestRequestCost = currRequestCost;
    11672 
    11673  if(bestRequestCost == 0 ||
    11675  {
    11676  break;
    11677  }
    11678  }
    11679  }
    11680  }
    11681  }
    11682 
    11683  if(pBestRequestBlock != VMA_NULL)
    11684  {
    11685  if(mapped)
    11686  {
    11687  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
    11688  if(res != VK_SUCCESS)
    11689  {
    11690  return res;
    11691  }
    11692  }
    11693 
    11694  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
    11695  currentFrameIndex,
    11696  m_FrameInUseCount,
    11697  &bestRequest))
    11698  {
    11699  // We no longer have an empty Allocation.
    11700  if(pBestRequestBlock->m_pMetadata->IsEmpty())
    11701  {
    11702  m_HasEmptyBlock = false;
    11703  }
    11704  // Allocate from this pBlock.
    11705  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    11706  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
    11707  (*pAllocation)->InitBlockAllocation(
    11708  hCurrentPool,
    11709  pBestRequestBlock,
    11710  bestRequest.offset,
    11711  alignment,
    11712  size,
    11713  suballocType,
    11714  mapped,
    11715  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    11716  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
    11717  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    11718  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    11719  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    11720  {
    11721  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    11722  }
    11723  if(IsCorruptionDetectionEnabled())
    11724  {
    11725  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
    11726  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    11727  }
    11728  return VK_SUCCESS;
    11729  }
    11730  // else: Some allocations must have been touched while we are here. Next try.
    11731  }
    11732  else
    11733  {
    11734  // Could not find place in any of the blocks - break outer loop.
    11735  break;
    11736  }
    11737  }
    11738  /* Maximum number of tries exceeded - a very unlike event when many other
    11739  threads are simultaneously touching allocations making it impossible to make
    11740  lost at the same time as we try to allocate. */
    11741  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    11742  {
    11743  return VK_ERROR_TOO_MANY_OBJECTS;
    11744  }
    11745  }
    11746 
    11747  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    11748 }
    11749 
    11750 void VmaBlockVector::Free(
    11751  VmaAllocation hAllocation)
    11752 {
    11753  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    11754 
    11755  // Scope for lock.
    11756  {
    11757  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
    11758 
    11759  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    11760 
    11761  if(IsCorruptionDetectionEnabled())
    11762  {
    11763  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
    11764  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
    11765  }
    11766 
    11767  if(hAllocation->IsPersistentMap())
    11768  {
    11769  pBlock->Unmap(m_hAllocator, 1);
    11770  }
    11771 
    11772  pBlock->m_pMetadata->Free(hAllocation);
    11773  VMA_HEAVY_ASSERT(pBlock->Validate());
    11774 
    11775  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
    11776 
    11777  // pBlock became empty after this deallocation.
    11778  if(pBlock->m_pMetadata->IsEmpty())
    11779  {
    11780  // Already has empty Allocation. We don't want to have two, so delete this one.
    11781  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    11782  {
    11783  pBlockToDelete = pBlock;
    11784  Remove(pBlock);
    11785  }
    11786  // We now have first empty block.
    11787  else
    11788  {
    11789  m_HasEmptyBlock = true;
    11790  }
    11791  }
    11792  // pBlock didn't become empty, but we have another empty block - find and free that one.
    11793  // (This is optional, heuristics.)
    11794  else if(m_HasEmptyBlock)
    11795  {
    11796  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    11797  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    11798  {
    11799  pBlockToDelete = pLastBlock;
    11800  m_Blocks.pop_back();
    11801  m_HasEmptyBlock = false;
    11802  }
    11803  }
    11804 
    11805  IncrementallySortBlocks();
    11806  }
    11807 
    11808  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    11809  // lock, for performance reason.
    11810  if(pBlockToDelete != VMA_NULL)
    11811  {
    11812  VMA_DEBUG_LOG(" Deleted empty allocation");
    11813  pBlockToDelete->Destroy(m_hAllocator);
    11814  vma_delete(m_hAllocator, pBlockToDelete);
    11815  }
    11816 }
    11817 
    11818 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
    11819 {
    11820  VkDeviceSize result = 0;
    11821  for(size_t i = m_Blocks.size(); i--; )
    11822  {
    11823  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
    11824  if(result >= m_PreferredBlockSize)
    11825  {
    11826  break;
    11827  }
    11828  }
    11829  return result;
    11830 }
    11831 
    11832 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    11833 {
    11834  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11835  {
    11836  if(m_Blocks[blockIndex] == pBlock)
    11837  {
    11838  VmaVectorRemove(m_Blocks, blockIndex);
    11839  return;
    11840  }
    11841  }
    11842  VMA_ASSERT(0);
    11843 }
    11844 
    11845 void VmaBlockVector::IncrementallySortBlocks()
    11846 {
    11847  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    11848  {
    11849  // Bubble sort only until first swap.
    11850  for(size_t i = 1; i < m_Blocks.size(); ++i)
    11851  {
    11852  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
    11853  {
    11854  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    11855  return;
    11856  }
    11857  }
    11858  }
    11859 }
    11860 
    11861 VkResult VmaBlockVector::AllocateFromBlock(
    11862  VmaDeviceMemoryBlock* pBlock,
    11863  VmaPool hCurrentPool,
    11864  uint32_t currentFrameIndex,
    11865  VkDeviceSize size,
    11866  VkDeviceSize alignment,
    11867  VmaAllocationCreateFlags allocFlags,
    11868  void* pUserData,
    11869  VmaSuballocationType suballocType,
    11870  uint32_t strategy,
    11871  VmaAllocation* pAllocation)
    11872 {
    11873  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
    11874  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    11875  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    11876  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    11877 
    11878  VmaAllocationRequest currRequest = {};
    11879  if(pBlock->m_pMetadata->CreateAllocationRequest(
    11880  currentFrameIndex,
    11881  m_FrameInUseCount,
    11882  m_BufferImageGranularity,
    11883  size,
    11884  alignment,
    11885  isUpperAddress,
    11886  suballocType,
    11887  false, // canMakeOtherLost
    11888  strategy,
    11889  &currRequest))
    11890  {
    11891  // Allocate from pCurrBlock.
    11892  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    11893 
    11894  if(mapped)
    11895  {
    11896  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
    11897  if(res != VK_SUCCESS)
    11898  {
    11899  return res;
    11900  }
    11901  }
    11902 
    11903  // We no longer have an empty Allocation.
    11904  if(pBlock->m_pMetadata->IsEmpty())
    11905  {
    11906  m_HasEmptyBlock = false;
    11907  }
    11908 
    11909  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    11910  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
    11911  (*pAllocation)->InitBlockAllocation(
    11912  hCurrentPool,
    11913  pBlock,
    11914  currRequest.offset,
    11915  alignment,
    11916  size,
    11917  suballocType,
    11918  mapped,
    11919  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    11920  VMA_HEAVY_ASSERT(pBlock->Validate());
    11921  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
    11922  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    11923  {
    11924  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    11925  }
    11926  if(IsCorruptionDetectionEnabled())
    11927  {
    11928  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
    11929  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    11930  }
    11931  return VK_SUCCESS;
    11932  }
    11933  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    11934 }
    11935 
    11936 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    11937 {
    11938  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    11939  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    11940  allocInfo.allocationSize = blockSize;
    11941  VkDeviceMemory mem = VK_NULL_HANDLE;
    11942  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    11943  if(res < 0)
    11944  {
    11945  return res;
    11946  }
    11947 
    11948  // New VkDeviceMemory successfully created.
    11949 
    11950  // Create new Allocation for it.
    11951  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    11952  pBlock->Init(
    11953  m_hAllocator,
    11954  m_MemoryTypeIndex,
    11955  mem,
    11956  allocInfo.allocationSize,
    11957  m_NextBlockId++,
    11958  m_Algorithm);
    11959 
    11960  m_Blocks.push_back(pBlock);
    11961  if(pNewBlockIndex != VMA_NULL)
    11962  {
    11963  *pNewBlockIndex = m_Blocks.size() - 1;
    11964  }
    11965 
    11966  return VK_SUCCESS;
    11967 }
    11968 
    11969 void VmaBlockVector::ApplyDefragmentationMovesCpu(
    11970  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    11971  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
    11972 {
    11973  const size_t blockCount = m_Blocks.size();
    11974  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
    11975 
    11976  enum BLOCK_FLAG
    11977  {
    11978  BLOCK_FLAG_USED = 0x00000001,
    11979  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
    11980  };
    11981 
    11982  struct BlockInfo
    11983  {
    11984  uint32_t flags;
    11985  void* pMappedData;
    11986  };
    11987  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
    11988  blockInfo(blockCount, VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
    11989  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
    11990 
    11991  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
    11992  const size_t moveCount = moves.size();
    11993  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    11994  {
    11995  const VmaDefragmentationMove& move = moves[moveIndex];
    11996  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
    11997  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
    11998  }
    11999 
    12000  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
    12001 
    12002  // Go over all blocks. Get mapped pointer or map if necessary.
    12003  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
    12004  {
    12005  BlockInfo& currBlockInfo = blockInfo[blockIndex];
    12006  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12007  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
    12008  {
    12009  currBlockInfo.pMappedData = pBlock->GetMappedData();
    12010  // It is not originally mapped - map it.
    12011  if(currBlockInfo.pMappedData == VMA_NULL)
    12012  {
    12013  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
    12014  if(pDefragCtx->res == VK_SUCCESS)
    12015  {
    12016  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
    12017  }
    12018  }
    12019  }
    12020  }
    12021 
    12022  // Go over all moves. Do actual data transfer.
    12023  if(pDefragCtx->res == VK_SUCCESS)
    12024  {
    12025  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    12026  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    12027 
    12028  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12029  {
    12030  const VmaDefragmentationMove& move = moves[moveIndex];
    12031 
    12032  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
    12033  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
    12034 
    12035  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
    12036 
    12037  // Invalidate source.
    12038  if(isNonCoherent)
    12039  {
    12040  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
    12041  memRange.memory = pSrcBlock->GetDeviceMemory();
    12042  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
    12043  memRange.size = VMA_MIN(
    12044  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
    12045  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
    12046  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
    12047  }
    12048 
    12049  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    12050  memmove(
    12051  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
    12052  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
    12053  static_cast<size_t>(move.size));
    12054 
    12055  if(IsCorruptionDetectionEnabled())
    12056  {
    12057  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
    12058  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
    12059  }
    12060 
    12061  // Flush destination.
    12062  if(isNonCoherent)
    12063  {
    12064  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
    12065  memRange.memory = pDstBlock->GetDeviceMemory();
    12066  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
    12067  memRange.size = VMA_MIN(
    12068  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
    12069  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
    12070  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
    12071  }
    12072  }
    12073  }
    12074 
    12075  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
    12076  // Regardless of pCtx->res == VK_SUCCESS.
    12077  for(size_t blockIndex = blockCount; blockIndex--; )
    12078  {
    12079  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
    12080  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
    12081  {
    12082  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12083  pBlock->Unmap(m_hAllocator, 1);
    12084  }
    12085  }
    12086 }
    12087 
    12088 void VmaBlockVector::ApplyDefragmentationMovesGpu(
    12089  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    12090  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12091  VkCommandBuffer commandBuffer)
    12092 {
    12093  const size_t blockCount = m_Blocks.size();
    12094 
    12095  pDefragCtx->blockContexts.resize(blockCount);
    12096  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
    12097 
    12098  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
    12099  const size_t moveCount = moves.size();
    12100  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12101  {
    12102  const VmaDefragmentationMove& move = moves[moveIndex];
    12103  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
    12104  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
    12105  }
    12106 
    12107  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
    12108 
    12109  // Go over all blocks. Create and bind buffer for whole block if necessary.
    12110  {
    12111  VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
    12112  bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
    12113  VK_BUFFER_USAGE_TRANSFER_DST_BIT;
    12114 
    12115  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
    12116  {
    12117  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
    12118  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12119  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
    12120  {
    12121  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
    12122  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
    12123  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
    12124  if(pDefragCtx->res == VK_SUCCESS)
    12125  {
    12126  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
    12127  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
    12128  }
    12129  }
    12130  }
    12131  }
    12132 
    12133  // Go over all moves. Post data transfer commands to command buffer.
    12134  if(pDefragCtx->res == VK_SUCCESS)
    12135  {
    12136  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    12137  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    12138 
    12139  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12140  {
    12141  const VmaDefragmentationMove& move = moves[moveIndex];
    12142 
    12143  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
    12144  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
    12145 
    12146  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
    12147 
    12148  VkBufferCopy region = {
    12149  move.srcOffset,
    12150  move.dstOffset,
    12151  move.size };
    12152  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
    12153  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
    12154  }
    12155  }
    12156 
    12157  // Save buffers to defrag context for later destruction.
    12158  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
    12159  {
    12160  pDefragCtx->res = VK_NOT_READY;
    12161  }
    12162 }
    12163 
    12164 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
    12165 {
    12166  m_HasEmptyBlock = false;
    12167  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    12168  {
    12169  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12170  if(pBlock->m_pMetadata->IsEmpty())
    12171  {
    12172  if(m_Blocks.size() > m_MinBlockCount)
    12173  {
    12174  if(pDefragmentationStats != VMA_NULL)
    12175  {
    12176  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    12177  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
    12178  }
    12179 
    12180  VmaVectorRemove(m_Blocks, blockIndex);
    12181  pBlock->Destroy(m_hAllocator);
    12182  vma_delete(m_hAllocator, pBlock);
    12183  }
    12184  else
    12185  {
    12186  m_HasEmptyBlock = true;
    12187  }
    12188  }
    12189  }
    12190 }
    12191 
    12192 #if VMA_STATS_STRING_ENABLED
    12193 
    12194 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    12195 {
    12196  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    12197 
    12198  json.BeginObject();
    12199 
    12200  if(m_IsCustomPool)
    12201  {
    12202  json.WriteString("MemoryTypeIndex");
    12203  json.WriteNumber(m_MemoryTypeIndex);
    12204 
    12205  json.WriteString("BlockSize");
    12206  json.WriteNumber(m_PreferredBlockSize);
    12207 
    12208  json.WriteString("BlockCount");
    12209  json.BeginObject(true);
    12210  if(m_MinBlockCount > 0)
    12211  {
    12212  json.WriteString("Min");
    12213  json.WriteNumber((uint64_t)m_MinBlockCount);
    12214  }
    12215  if(m_MaxBlockCount < SIZE_MAX)
    12216  {
    12217  json.WriteString("Max");
    12218  json.WriteNumber((uint64_t)m_MaxBlockCount);
    12219  }
    12220  json.WriteString("Cur");
    12221  json.WriteNumber((uint64_t)m_Blocks.size());
    12222  json.EndObject();
    12223 
    12224  if(m_FrameInUseCount > 0)
    12225  {
    12226  json.WriteString("FrameInUseCount");
    12227  json.WriteNumber(m_FrameInUseCount);
    12228  }
    12229 
    12230  if(m_Algorithm != 0)
    12231  {
    12232  json.WriteString("Algorithm");
    12233  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
    12234  }
    12235  }
    12236  else
    12237  {
    12238  json.WriteString("PreferredBlockSize");
    12239  json.WriteNumber(m_PreferredBlockSize);
    12240  }
    12241 
    12242  json.WriteString("Blocks");
    12243  json.BeginObject();
    12244  for(size_t i = 0; i < m_Blocks.size(); ++i)
    12245  {
    12246  json.BeginString();
    12247  json.ContinueString(m_Blocks[i]->GetId());
    12248  json.EndString();
    12249 
    12250  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
    12251  }
    12252  json.EndObject();
    12253 
    12254  json.EndObject();
    12255 }
    12256 
    12257 #endif // #if VMA_STATS_STRING_ENABLED
    12258 
    12259 void VmaBlockVector::Defragment(
    12260  class VmaBlockVectorDefragmentationContext* pCtx,
    12261  VmaDefragmentationStats* pStats,
    12262  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
    12263  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
    12264  VkCommandBuffer commandBuffer)
    12265 {
    12266  pCtx->res = VK_SUCCESS;
    12267 
    12268  const VkMemoryPropertyFlags memPropFlags =
    12269  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
    12270  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
    12271  const bool isHostCoherent = (memPropFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0;
    12272 
    12273  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
    12274  isHostVisible;
    12275  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
    12276  (VMA_DEBUG_DETECT_CORRUPTION == 0 || !(isHostVisible && isHostCoherent));
    12277 
    12278  // There are options to defragment this memory type.
    12279  if(canDefragmentOnCpu || canDefragmentOnGpu)
    12280  {
    12281  bool defragmentOnGpu;
    12282  // There is only one option to defragment this memory type.
    12283  if(canDefragmentOnGpu != canDefragmentOnCpu)
    12284  {
    12285  defragmentOnGpu = canDefragmentOnGpu;
    12286  }
    12287  // Both options are available: Heuristics to choose the best one.
    12288  else
    12289  {
    12290  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
    12291  m_hAllocator->IsIntegratedGpu();
    12292  }
    12293 
    12294  bool overlappingMoveSupported = !defragmentOnGpu;
    12295 
    12296  if(m_hAllocator->m_UseMutex)
    12297  {
    12298  m_Mutex.LockWrite();
    12299  pCtx->mutexLocked = true;
    12300  }
    12301 
    12302  pCtx->Begin(overlappingMoveSupported);
    12303 
    12304  // Defragment.
    12305 
    12306  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
    12307  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
    12308  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
    12309  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
    12310  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
    12311 
    12312  // Accumulate statistics.
    12313  if(pStats != VMA_NULL)
    12314  {
    12315  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
    12316  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
    12317  pStats->bytesMoved += bytesMoved;
    12318  pStats->allocationsMoved += allocationsMoved;
    12319  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    12320  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    12321  if(defragmentOnGpu)
    12322  {
    12323  maxGpuBytesToMove -= bytesMoved;
    12324  maxGpuAllocationsToMove -= allocationsMoved;
    12325  }
    12326  else
    12327  {
    12328  maxCpuBytesToMove -= bytesMoved;
    12329  maxCpuAllocationsToMove -= allocationsMoved;
    12330  }
    12331  }
    12332 
    12333  if(pCtx->res >= VK_SUCCESS)
    12334  {
    12335  if(defragmentOnGpu)
    12336  {
    12337  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
    12338  }
    12339  else
    12340  {
    12341  ApplyDefragmentationMovesCpu(pCtx, moves);
    12342  }
    12343  }
    12344  }
    12345 }
    12346 
    12347 void VmaBlockVector::DefragmentationEnd(
    12348  class VmaBlockVectorDefragmentationContext* pCtx,
    12349  VmaDefragmentationStats* pStats)
    12350 {
    12351  // Destroy buffers.
    12352  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
    12353  {
    12354  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
    12355  if(blockCtx.hBuffer)
    12356  {
    12357  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
    12358  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
    12359  }
    12360  }
    12361 
    12362  if(pCtx->res >= VK_SUCCESS)
    12363  {
    12364  FreeEmptyBlocks(pStats);
    12365  }
    12366 
    12367  if(pCtx->mutexLocked)
    12368  {
    12369  VMA_ASSERT(m_hAllocator->m_UseMutex);
    12370  m_Mutex.UnlockWrite();
    12371  }
    12372 }
    12373 
    12374 size_t VmaBlockVector::CalcAllocationCount() const
    12375 {
    12376  size_t result = 0;
    12377  for(size_t i = 0; i < m_Blocks.size(); ++i)
    12378  {
    12379  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
    12380  }
    12381  return result;
    12382 }
    12383 
    12384 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
    12385 {
    12386  if(m_BufferImageGranularity == 1)
    12387  {
    12388  return false;
    12389  }
    12390  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
    12391  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
    12392  {
    12393  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
    12394  VMA_ASSERT(m_Algorithm == 0);
    12395  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
    12396  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
    12397  {
    12398  return true;
    12399  }
    12400  }
    12401  return false;
    12402 }
    12403 
    12404 void VmaBlockVector::MakePoolAllocationsLost(
    12405  uint32_t currentFrameIndex,
    12406  size_t* pLostAllocationCount)
    12407 {
    12408  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
    12409  size_t lostAllocationCount = 0;
    12410  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    12411  {
    12412  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    12413  VMA_ASSERT(pBlock);
    12414  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    12415  }
    12416  if(pLostAllocationCount != VMA_NULL)
    12417  {
    12418  *pLostAllocationCount = lostAllocationCount;
    12419  }
    12420 }
    12421 
    12422 VkResult VmaBlockVector::CheckCorruption()
    12423 {
    12424  if(!IsCorruptionDetectionEnabled())
    12425  {
    12426  return VK_ERROR_FEATURE_NOT_PRESENT;
    12427  }
    12428 
    12429  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    12430  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    12431  {
    12432  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    12433  VMA_ASSERT(pBlock);
    12434  VkResult res = pBlock->CheckCorruption(m_hAllocator);
    12435  if(res != VK_SUCCESS)
    12436  {
    12437  return res;
    12438  }
    12439  }
    12440  return VK_SUCCESS;
    12441 }
    12442 
    12443 void VmaBlockVector::AddStats(VmaStats* pStats)
    12444 {
    12445  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    12446  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    12447 
    12448  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    12449 
    12450  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    12451  {
    12452  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    12453  VMA_ASSERT(pBlock);
    12454  VMA_HEAVY_ASSERT(pBlock->Validate());
    12455  VmaStatInfo allocationStatInfo;
    12456  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
    12457  VmaAddStatInfo(pStats->total, allocationStatInfo);
    12458  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    12459  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    12460  }
    12461 }
    12462 
    12464 // VmaDefragmentationAlgorithm_Generic members definition
    12465 
    12466 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
    12467  VmaAllocator hAllocator,
    12468  VmaBlockVector* pBlockVector,
    12469  uint32_t currentFrameIndex,
    12470  bool overlappingMoveSupported) :
    12471  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
    12472  m_AllAllocations(false),
    12473  m_AllocationCount(0),
    12474  m_BytesMoved(0),
    12475  m_AllocationsMoved(0),
    12476  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    12477 {
    12478  // Create block info for each block.
    12479  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    12480  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    12481  {
    12482  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    12483  pBlockInfo->m_OriginalBlockIndex = blockIndex;
    12484  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    12485  m_Blocks.push_back(pBlockInfo);
    12486  }
    12487 
    12488  // Sort them by m_pBlock pointer value.
    12489  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    12490 }
    12491 
    12492 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
    12493 {
    12494  for(size_t i = m_Blocks.size(); i--; )
    12495  {
    12496  vma_delete(m_hAllocator, m_Blocks[i]);
    12497  }
    12498 }
    12499 
    12500 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    12501 {
    12502  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    12503  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    12504  {
    12505  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
    12506  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    12507  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    12508  {
    12509  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
    12510  (*it)->m_Allocations.push_back(allocInfo);
    12511  }
    12512  else
    12513  {
    12514  VMA_ASSERT(0);
    12515  }
    12516 
    12517  ++m_AllocationCount;
    12518  }
    12519 }
    12520 
    12521 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
    12522  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12523  VkDeviceSize maxBytesToMove,
    12524  uint32_t maxAllocationsToMove)
    12525 {
    12526  if(m_Blocks.empty())
    12527  {
    12528  return VK_SUCCESS;
    12529  }
    12530 
    12531  // This is a choice based on research.
    12532  // Option 1:
    12533  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
    12534  // Option 2:
    12535  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
    12536  // Option 3:
    12537  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
    12538 
    12539  size_t srcBlockMinIndex = 0;
    12540  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
    12541  /*
    12542  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
    12543  {
    12544  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
    12545  if(blocksWithNonMovableCount > 0)
    12546  {
    12547  srcBlockMinIndex = blocksWithNonMovableCount - 1;
    12548  }
    12549  }
    12550  */
    12551 
    12552  size_t srcBlockIndex = m_Blocks.size() - 1;
    12553  size_t srcAllocIndex = SIZE_MAX;
    12554  for(;;)
    12555  {
    12556  // 1. Find next allocation to move.
    12557  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    12558  // 1.2. Then start from last to first m_Allocations.
    12559  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    12560  {
    12561  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    12562  {
    12563  // Finished: no more allocations to process.
    12564  if(srcBlockIndex == srcBlockMinIndex)
    12565  {
    12566  return VK_SUCCESS;
    12567  }
    12568  else
    12569  {
    12570  --srcBlockIndex;
    12571  srcAllocIndex = SIZE_MAX;
    12572  }
    12573  }
    12574  else
    12575  {
    12576  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    12577  }
    12578  }
    12579 
    12580  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    12581  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    12582 
    12583  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    12584  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    12585  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    12586  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    12587 
    12588  // 2. Try to find new place for this allocation in preceding or current block.
    12589  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    12590  {
    12591  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    12592  VmaAllocationRequest dstAllocRequest;
    12593  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
    12594  m_CurrentFrameIndex,
    12595  m_pBlockVector->GetFrameInUseCount(),
    12596  m_pBlockVector->GetBufferImageGranularity(),
    12597  size,
    12598  alignment,
    12599  false, // upperAddress
    12600  suballocType,
    12601  false, // canMakeOtherLost
    12602  strategy,
    12603  &dstAllocRequest) &&
    12604  MoveMakesSense(
    12605  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    12606  {
    12607  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    12608 
    12609  // Reached limit on number of allocations or bytes to move.
    12610  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    12611  (m_BytesMoved + size > maxBytesToMove))
    12612  {
    12613  return VK_SUCCESS;
    12614  }
    12615 
    12616  VmaDefragmentationMove move;
    12617  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
    12618  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
    12619  move.srcOffset = srcOffset;
    12620  move.dstOffset = dstAllocRequest.offset;
    12621  move.size = size;
    12622  moves.push_back(move);
    12623 
    12624  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
    12625  dstAllocRequest,
    12626  suballocType,
    12627  size,
    12628  false, // upperAddress
    12629  allocInfo.m_hAllocation);
    12630  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
    12631 
    12632  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    12633 
    12634  if(allocInfo.m_pChanged != VMA_NULL)
    12635  {
    12636  *allocInfo.m_pChanged = VK_TRUE;
    12637  }
    12638 
    12639  ++m_AllocationsMoved;
    12640  m_BytesMoved += size;
    12641 
    12642  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    12643 
    12644  break;
    12645  }
    12646  }
    12647 
    12648  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    12649 
    12650  if(srcAllocIndex > 0)
    12651  {
    12652  --srcAllocIndex;
    12653  }
    12654  else
    12655  {
    12656  if(srcBlockIndex > 0)
    12657  {
    12658  --srcBlockIndex;
    12659  srcAllocIndex = SIZE_MAX;
    12660  }
    12661  else
    12662  {
    12663  return VK_SUCCESS;
    12664  }
    12665  }
    12666  }
    12667 }
    12668 
    12669 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
    12670 {
    12671  size_t result = 0;
    12672  for(size_t i = 0; i < m_Blocks.size(); ++i)
    12673  {
    12674  if(m_Blocks[i]->m_HasNonMovableAllocations)
    12675  {
    12676  ++result;
    12677  }
    12678  }
    12679  return result;
    12680 }
    12681 
    12682 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
    12683  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12684  VkDeviceSize maxBytesToMove,
    12685  uint32_t maxAllocationsToMove)
    12686 {
    12687  if(!m_AllAllocations && m_AllocationCount == 0)
    12688  {
    12689  return VK_SUCCESS;
    12690  }
    12691 
    12692  const size_t blockCount = m_Blocks.size();
    12693  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    12694  {
    12695  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    12696 
    12697  if(m_AllAllocations)
    12698  {
    12699  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
    12700  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
    12701  it != pMetadata->m_Suballocations.end();
    12702  ++it)
    12703  {
    12704  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    12705  {
    12706  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
    12707  pBlockInfo->m_Allocations.push_back(allocInfo);
    12708  }
    12709  }
    12710  }
    12711 
    12712  pBlockInfo->CalcHasNonMovableAllocations();
    12713 
    12714  // This is a choice based on research.
    12715  // Option 1:
    12716  pBlockInfo->SortAllocationsByOffsetDescending();
    12717  // Option 2:
    12718  //pBlockInfo->SortAllocationsBySizeDescending();
    12719  }
    12720 
    12721  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    12722  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    12723 
    12724  // This is a choice based on research.
    12725  const uint32_t roundCount = 2;
    12726 
    12727  // Execute defragmentation rounds (the main part).
    12728  VkResult result = VK_SUCCESS;
    12729  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
    12730  {
    12731  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
    12732  }
    12733 
    12734  return result;
    12735 }
    12736 
    12737 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
    12738  size_t dstBlockIndex, VkDeviceSize dstOffset,
    12739  size_t srcBlockIndex, VkDeviceSize srcOffset)
    12740 {
    12741  if(dstBlockIndex < srcBlockIndex)
    12742  {
    12743  return true;
    12744  }
    12745  if(dstBlockIndex > srcBlockIndex)
    12746  {
    12747  return false;
    12748  }
    12749  if(dstOffset < srcOffset)
    12750  {
    12751  return true;
    12752  }
    12753  return false;
    12754 }
    12755 
    12757 // VmaDefragmentationAlgorithm_Fast
    12758 
    12759 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
    12760  VmaAllocator hAllocator,
    12761  VmaBlockVector* pBlockVector,
    12762  uint32_t currentFrameIndex,
    12763  bool overlappingMoveSupported) :
    12764  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
    12765  m_OverlappingMoveSupported(overlappingMoveSupported),
    12766  m_AllocationCount(0),
    12767  m_AllAllocations(false),
    12768  m_BytesMoved(0),
    12769  m_AllocationsMoved(0),
    12770  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
    12771 {
    12772  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
    12773 
    12774 }
    12775 
    12776 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
    12777 {
    12778 }
    12779 
    12780 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
    12781  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12782  VkDeviceSize maxBytesToMove,
    12783  uint32_t maxAllocationsToMove)
    12784 {
    12785  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
    12786 
    12787  const size_t blockCount = m_pBlockVector->GetBlockCount();
    12788  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
    12789  {
    12790  return VK_SUCCESS;
    12791  }
    12792 
    12793  PreprocessMetadata();
    12794 
    12795  // Sort blocks in order from most destination.
    12796 
    12797  m_BlockInfos.resize(blockCount);
    12798  for(size_t i = 0; i < blockCount; ++i)
    12799  {
    12800  m_BlockInfos[i].origBlockIndex = i;
    12801  }
    12802 
    12803  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
    12804  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
    12805  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
    12806  });
    12807 
    12808  // THE MAIN ALGORITHM
    12809 
    12810  FreeSpaceDatabase freeSpaceDb;
    12811 
    12812  size_t dstBlockInfoIndex = 0;
    12813  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
    12814  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
    12815  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
    12816  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
    12817  VkDeviceSize dstOffset = 0;
    12818 
    12819  bool end = false;
    12820  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
    12821  {
    12822  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
    12823  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
    12824  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
    12825  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
    12826  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
    12827  {
    12828  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
    12829  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
    12830  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
    12831  if(m_AllocationsMoved == maxAllocationsToMove ||
    12832  m_BytesMoved + srcAllocSize > maxBytesToMove)
    12833  {
    12834  end = true;
    12835  break;
    12836  }
    12837  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
    12838 
    12839  // Try to place it in one of free spaces from the database.
    12840  size_t freeSpaceInfoIndex;
    12841  VkDeviceSize dstAllocOffset;
    12842  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
    12843  freeSpaceInfoIndex, dstAllocOffset))
    12844  {
    12845  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
    12846  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
    12847  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
    12848  VkDeviceSize freeSpaceBlockSize = pFreeSpaceMetadata->GetSize();
    12849 
    12850  // Same block
    12851  if(freeSpaceInfoIndex == srcBlockInfoIndex)
    12852  {
    12853  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
    12854 
    12855  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
    12856 
    12857  VmaSuballocation suballoc = *srcSuballocIt;
    12858  suballoc.offset = dstAllocOffset;
    12859  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
    12860  m_BytesMoved += srcAllocSize;
    12861  ++m_AllocationsMoved;
    12862 
    12863  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
    12864  ++nextSuballocIt;
    12865  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
    12866  srcSuballocIt = nextSuballocIt;
    12867 
    12868  InsertSuballoc(pFreeSpaceMetadata, suballoc);
    12869 
    12870  VmaDefragmentationMove move = {
    12871  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
    12872  srcAllocOffset, dstAllocOffset,
    12873  srcAllocSize };
    12874  moves.push_back(move);
    12875  }
    12876  // Different block
    12877  else
    12878  {
    12879  // MOVE OPTION 2: Move the allocation to a different block.
    12880 
    12881  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
    12882 
    12883  VmaSuballocation suballoc = *srcSuballocIt;
    12884  suballoc.offset = dstAllocOffset;
    12885  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
    12886  m_BytesMoved += srcAllocSize;
    12887  ++m_AllocationsMoved;
    12888 
    12889  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
    12890  ++nextSuballocIt;
    12891  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
    12892  srcSuballocIt = nextSuballocIt;
    12893 
    12894  InsertSuballoc(pFreeSpaceMetadata, suballoc);
    12895 
    12896  VmaDefragmentationMove move = {
    12897  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
    12898  srcAllocOffset, dstAllocOffset,
    12899  srcAllocSize };
    12900  moves.push_back(move);
    12901  }
    12902  }
    12903  else
    12904  {
    12905  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
    12906 
    12907  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
    12908  while(dstBlockInfoIndex < srcBlockInfoIndex &&
    12909  dstAllocOffset + srcAllocSize > dstBlockSize)
    12910  {
    12911  // But before that, register remaining free space at the end of dst block.
    12912  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
    12913 
    12914  ++dstBlockInfoIndex;
    12915  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
    12916  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
    12917  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
    12918  dstBlockSize = pDstMetadata->GetSize();
    12919  dstOffset = 0;
    12920  dstAllocOffset = 0;
    12921  }
    12922 
    12923  // Same block
    12924  if(dstBlockInfoIndex == srcBlockInfoIndex)
    12925  {
    12926  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
    12927 
    12928  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
    12929 
    12930  bool skipOver = overlap;
    12931  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
    12932  {
    12933  // If destination and source place overlap, skip if it would move it
    12934  // by only < 1/64 of its size.
    12935  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
    12936  }
    12937 
    12938  if(skipOver)
    12939  {
    12940  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
    12941 
    12942  dstOffset = srcAllocOffset + srcAllocSize;
    12943  ++srcSuballocIt;
    12944  }
    12945  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
    12946  else
    12947  {
    12948  srcSuballocIt->offset = dstAllocOffset;
    12949  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
    12950  dstOffset = dstAllocOffset + srcAllocSize;
    12951  m_BytesMoved += srcAllocSize;
    12952  ++m_AllocationsMoved;
    12953  ++srcSuballocIt;
    12954  VmaDefragmentationMove move = {
    12955  srcOrigBlockIndex, dstOrigBlockIndex,
    12956  srcAllocOffset, dstAllocOffset,
    12957  srcAllocSize };
    12958  moves.push_back(move);
    12959  }
    12960  }
    12961  // Different block
    12962  else
    12963  {
    12964  // MOVE OPTION 2: Move the allocation to a different block.
    12965 
    12966  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
    12967  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
    12968 
    12969  VmaSuballocation suballoc = *srcSuballocIt;
    12970  suballoc.offset = dstAllocOffset;
    12971  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
    12972  dstOffset = dstAllocOffset + srcAllocSize;
    12973  m_BytesMoved += srcAllocSize;
    12974  ++m_AllocationsMoved;
    12975 
    12976  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
    12977  ++nextSuballocIt;
    12978  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
    12979  srcSuballocIt = nextSuballocIt;
    12980 
    12981  pDstMetadata->m_Suballocations.push_back(suballoc);
    12982 
    12983  VmaDefragmentationMove move = {
    12984  srcOrigBlockIndex, dstOrigBlockIndex,
    12985  srcAllocOffset, dstAllocOffset,
    12986  srcAllocSize };
    12987  moves.push_back(move);
    12988  }
    12989  }
    12990  }
    12991  }
    12992 
    12993  m_BlockInfos.clear();
    12994 
    12995  PostprocessMetadata();
    12996 
    12997  return VK_SUCCESS;
    12998 }
    12999 
    13000 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
    13001 {
    13002  const size_t blockCount = m_pBlockVector->GetBlockCount();
    13003  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    13004  {
    13005  VmaBlockMetadata_Generic* const pMetadata =
    13006  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
    13007  pMetadata->m_FreeCount = 0;
    13008  pMetadata->m_SumFreeSize = pMetadata->GetSize();
    13009  pMetadata->m_FreeSuballocationsBySize.clear();
    13010  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
    13011  it != pMetadata->m_Suballocations.end(); )
    13012  {
    13013  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
    13014  {
    13015  VmaSuballocationList::iterator nextIt = it;
    13016  ++nextIt;
    13017  pMetadata->m_Suballocations.erase(it);
    13018  it = nextIt;
    13019  }
    13020  else
    13021  {
    13022  ++it;
    13023  }
    13024  }
    13025  }
    13026 }
    13027 
    13028 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
    13029 {
    13030  const size_t blockCount = m_pBlockVector->GetBlockCount();
    13031  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    13032  {
    13033  VmaBlockMetadata_Generic* const pMetadata =
    13034  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
    13035  const VkDeviceSize blockSize = pMetadata->GetSize();
    13036 
    13037  // No allocations in this block - entire area is free.
    13038  if(pMetadata->m_Suballocations.empty())
    13039  {
    13040  pMetadata->m_FreeCount = 1;
    13041  //pMetadata->m_SumFreeSize is already set to blockSize.
    13042  VmaSuballocation suballoc = {
    13043  0, // offset
    13044  blockSize, // size
    13045  VMA_NULL, // hAllocation
    13046  VMA_SUBALLOCATION_TYPE_FREE };
    13047  pMetadata->m_Suballocations.push_back(suballoc);
    13048  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
    13049  }
    13050  // There are some allocations in this block.
    13051  else
    13052  {
    13053  VkDeviceSize offset = 0;
    13054  VmaSuballocationList::iterator it;
    13055  for(it = pMetadata->m_Suballocations.begin();
    13056  it != pMetadata->m_Suballocations.end();
    13057  ++it)
    13058  {
    13059  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
    13060  VMA_ASSERT(it->offset >= offset);
    13061 
    13062  // Need to insert preceding free space.
    13063  if(it->offset > offset)
    13064  {
    13065  ++pMetadata->m_FreeCount;
    13066  const VkDeviceSize freeSize = it->offset - offset;
    13067  VmaSuballocation suballoc = {
    13068  offset, // offset
    13069  freeSize, // size
    13070  VMA_NULL, // hAllocation
    13071  VMA_SUBALLOCATION_TYPE_FREE };
    13072  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
    13073  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    13074  {
    13075  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
    13076  }
    13077  }
    13078 
    13079  pMetadata->m_SumFreeSize -= it->size;
    13080  offset = it->offset + it->size;
    13081  }
    13082 
    13083  // Need to insert trailing free space.
    13084  if(offset < blockSize)
    13085  {
    13086  ++pMetadata->m_FreeCount;
    13087  const VkDeviceSize freeSize = blockSize - offset;
    13088  VmaSuballocation suballoc = {
    13089  offset, // offset
    13090  freeSize, // size
    13091  VMA_NULL, // hAllocation
    13092  VMA_SUBALLOCATION_TYPE_FREE };
    13093  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
    13094  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
    13095  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    13096  {
    13097  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
    13098  }
    13099  }
    13100 
    13101  VMA_SORT(
    13102  pMetadata->m_FreeSuballocationsBySize.begin(),
    13103  pMetadata->m_FreeSuballocationsBySize.end(),
    13104  VmaSuballocationItemSizeLess());
    13105  }
    13106 
    13107  VMA_HEAVY_ASSERT(pMetadata->Validate());
    13108  }
    13109 }
    13110 
    13111 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
    13112 {
    13113  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
    13114  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
    13115  while(it != pMetadata->m_Suballocations.end())
    13116  {
    13117  if(it->offset < suballoc.offset)
    13118  {
    13119  ++it;
    13120  }
    13121  }
    13122  pMetadata->m_Suballocations.insert(it, suballoc);
    13123 }
    13124 
    13126 // VmaBlockVectorDefragmentationContext
    13127 
    13128 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
    13129  VmaAllocator hAllocator,
    13130  VmaPool hCustomPool,
    13131  VmaBlockVector* pBlockVector,
    13132  uint32_t currFrameIndex,
    13133  uint32_t algorithmFlags) :
    13134  res(VK_SUCCESS),
    13135  mutexLocked(false),
    13136  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
    13137  m_hAllocator(hAllocator),
    13138  m_hCustomPool(hCustomPool),
    13139  m_pBlockVector(pBlockVector),
    13140  m_CurrFrameIndex(currFrameIndex),
    13141  m_AlgorithmFlags(algorithmFlags),
    13142  m_pAlgorithm(VMA_NULL),
    13143  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
    13144  m_AllAllocations(false)
    13145 {
    13146 }
    13147 
    13148 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
    13149 {
    13150  vma_delete(m_hAllocator, m_pAlgorithm);
    13151 }
    13152 
    13153 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    13154 {
    13155  AllocInfo info = { hAlloc, pChanged };
    13156  m_Allocations.push_back(info);
    13157 }
    13158 
    13159 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
    13160 {
    13161  const bool allAllocations = m_AllAllocations ||
    13162  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
    13163 
    13164  /********************************
    13165  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
    13166  ********************************/
    13167 
    13168  /*
    13169  Fast algorithm is supported only when certain criteria are met:
    13170  - VMA_DEBUG_MARGIN is 0.
    13171  - All allocations in this block vector are moveable.
    13172  - There is no possibility of image/buffer granularity conflict.
    13173  */
    13174  if(VMA_DEBUG_MARGIN == 0 &&
    13175  allAllocations &&
    13176  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
    13177  {
    13178  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
    13179  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
    13180  }
    13181  else
    13182  {
    13183  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
    13184  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
    13185  }
    13186 
    13187  if(allAllocations)
    13188  {
    13189  m_pAlgorithm->AddAll();
    13190  }
    13191  else
    13192  {
    13193  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
    13194  {
    13195  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
    13196  }
    13197  }
    13198 }
    13199 
    13201 // VmaDefragmentationContext
    13202 
    13203 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
    13204  VmaAllocator hAllocator,
    13205  uint32_t currFrameIndex,
    13206  uint32_t flags,
    13207  VmaDefragmentationStats* pStats) :
    13208  m_hAllocator(hAllocator),
    13209  m_CurrFrameIndex(currFrameIndex),
    13210  m_Flags(flags),
    13211  m_pStats(pStats),
    13212  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
    13213 {
    13214  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
    13215 }
    13216 
    13217 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
    13218 {
    13219  for(size_t i = m_CustomPoolContexts.size(); i--; )
    13220  {
    13221  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
    13222  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
    13223  vma_delete(m_hAllocator, pBlockVectorCtx);
    13224  }
    13225  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
    13226  {
    13227  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
    13228  if(pBlockVectorCtx)
    13229  {
    13230  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
    13231  vma_delete(m_hAllocator, pBlockVectorCtx);
    13232  }
    13233  }
    13234 }
    13235 
    13236 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
    13237 {
    13238  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    13239  {
    13240  VmaPool pool = pPools[poolIndex];
    13241  VMA_ASSERT(pool);
    13242  // Pools with algorithm other than default are not defragmented.
    13243  if(pool->m_BlockVector.GetAlgorithm() == 0)
    13244  {
    13245  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
    13246 
    13247  for(size_t i = m_CustomPoolContexts.size(); i--; )
    13248  {
    13249  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
    13250  {
    13251  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
    13252  break;
    13253  }
    13254  }
    13255 
    13256  if(!pBlockVectorDefragCtx)
    13257  {
    13258  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
    13259  m_hAllocator,
    13260  pool,
    13261  &pool->m_BlockVector,
    13262  m_CurrFrameIndex,
    13263  m_Flags);
    13264  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
    13265  }
    13266 
    13267  pBlockVectorDefragCtx->AddAll();
    13268  }
    13269  }
    13270 }
    13271 
    13272 void VmaDefragmentationContext_T::AddAllocations(
    13273  uint32_t allocationCount,
    13274  VmaAllocation* pAllocations,
    13275  VkBool32* pAllocationsChanged)
    13276 {
    13277  // Dispatch pAllocations among defragmentators. Create them when necessary.
    13278  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    13279  {
    13280  const VmaAllocation hAlloc = pAllocations[allocIndex];
    13281  VMA_ASSERT(hAlloc);
    13282  // DedicatedAlloc cannot be defragmented.
    13283  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    13284  // Lost allocation cannot be defragmented.
    13285  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    13286  {
    13287  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
    13288 
    13289  const VmaPool hAllocPool = hAlloc->GetPool();
    13290  // This allocation belongs to custom pool.
    13291  if(hAllocPool != VK_NULL_HANDLE)
    13292  {
    13293  // Pools with algorithm other than default are not defragmented.
    13294  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
    13295  {
    13296  for(size_t i = m_CustomPoolContexts.size(); i--; )
    13297  {
    13298  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
    13299  {
    13300  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
    13301  break;
    13302  }
    13303  }
    13304  if(!pBlockVectorDefragCtx)
    13305  {
    13306  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
    13307  m_hAllocator,
    13308  hAllocPool,
    13309  &hAllocPool->m_BlockVector,
    13310  m_CurrFrameIndex,
    13311  m_Flags);
    13312  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
    13313  }
    13314  }
    13315  }
    13316  // This allocation belongs to default pool.
    13317  else
    13318  {
    13319  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    13320  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
    13321  if(!pBlockVectorDefragCtx)
    13322  {
    13323  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
    13324  m_hAllocator,
    13325  VMA_NULL, // hCustomPool
    13326  m_hAllocator->m_pBlockVectors[memTypeIndex],
    13327  m_CurrFrameIndex,
    13328  m_Flags);
    13329  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
    13330  }
    13331  }
    13332 
    13333  if(pBlockVectorDefragCtx)
    13334  {
    13335  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    13336  &pAllocationsChanged[allocIndex] : VMA_NULL;
    13337  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
    13338  }
    13339  }
    13340  }
    13341 }
    13342 
    13343 VkResult VmaDefragmentationContext_T::Defragment(
    13344  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
    13345  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
    13346  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
    13347 {
    13348  if(pStats)
    13349  {
    13350  memset(pStats, 0, sizeof(VmaDefragmentationStats));
    13351  }
    13352 
    13353  if(commandBuffer == VK_NULL_HANDLE)
    13354  {
    13355  maxGpuBytesToMove = 0;
    13356  maxGpuAllocationsToMove = 0;
    13357  }
    13358 
    13359  VkResult res = VK_SUCCESS;
    13360 
    13361  // Process default pools.
    13362  for(uint32_t memTypeIndex = 0;
    13363  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
    13364  ++memTypeIndex)
    13365  {
    13366  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
    13367  if(pBlockVectorCtx)
    13368  {
    13369  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
    13370  pBlockVectorCtx->GetBlockVector()->Defragment(
    13371  pBlockVectorCtx,
    13372  pStats,
    13373  maxCpuBytesToMove, maxCpuAllocationsToMove,
    13374  maxGpuBytesToMove, maxGpuAllocationsToMove,
    13375  commandBuffer);
    13376  if(pBlockVectorCtx->res != VK_SUCCESS)
    13377  {
    13378  res = pBlockVectorCtx->res;
    13379  }
    13380  }
    13381  }
    13382 
    13383  // Process custom pools.
    13384  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
    13385  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
    13386  ++customCtxIndex)
    13387  {
    13388  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
    13389  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
    13390  pBlockVectorCtx->GetBlockVector()->Defragment(
    13391  pBlockVectorCtx,
    13392  pStats,
    13393  maxCpuBytesToMove, maxCpuAllocationsToMove,
    13394  maxGpuBytesToMove, maxGpuAllocationsToMove,
    13395  commandBuffer);
    13396  if(pBlockVectorCtx->res != VK_SUCCESS)
    13397  {
    13398  res = pBlockVectorCtx->res;
    13399  }
    13400  }
    13401 
    13402  return res;
    13403 }
    13404 
    13406 // VmaRecorder
    13407 
    13408 #if VMA_RECORDING_ENABLED
    13409 
    13410 VmaRecorder::VmaRecorder() :
    13411  m_UseMutex(true),
    13412  m_Flags(0),
    13413  m_File(VMA_NULL),
    13414  m_Freq(INT64_MAX),
    13415  m_StartCounter(INT64_MAX)
    13416 {
    13417 }
    13418 
    13419 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
    13420 {
    13421  m_UseMutex = useMutex;
    13422  m_Flags = settings.flags;
    13423 
    13424  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
    13425  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
    13426 
    13427  // Open file for writing.
    13428  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
    13429  if(err != 0)
    13430  {
    13431  return VK_ERROR_INITIALIZATION_FAILED;
    13432  }
    13433 
    13434  // Write header.
    13435  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
    13436  fprintf(m_File, "%s\n", "1,5");
    13437 
    13438  return VK_SUCCESS;
    13439 }
    13440 
    13441 VmaRecorder::~VmaRecorder()
    13442 {
    13443  if(m_File != VMA_NULL)
    13444  {
    13445  fclose(m_File);
    13446  }
    13447 }
    13448 
    13449 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
    13450 {
    13451  CallParams callParams;
    13452  GetBasicParams(callParams);
    13453 
    13454  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13455  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
    13456  Flush();
    13457 }
    13458 
    13459 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
    13460 {
    13461  CallParams callParams;
    13462  GetBasicParams(callParams);
    13463 
    13464  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13465  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
    13466  Flush();
    13467 }
    13468 
    13469 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
    13470 {
    13471  CallParams callParams;
    13472  GetBasicParams(callParams);
    13473 
    13474  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13475  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
    13476  createInfo.memoryTypeIndex,
    13477  createInfo.flags,
    13478  createInfo.blockSize,
    13479  (uint64_t)createInfo.minBlockCount,
    13480  (uint64_t)createInfo.maxBlockCount,
    13481  createInfo.frameInUseCount,
    13482  pool);
    13483  Flush();
    13484 }
    13485 
    13486 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
    13487 {
    13488  CallParams callParams;
    13489  GetBasicParams(callParams);
    13490 
    13491  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13492  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
    13493  pool);
    13494  Flush();
    13495 }
    13496 
    13497 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
    13498  const VkMemoryRequirements& vkMemReq,
    13499  const VmaAllocationCreateInfo& createInfo,
    13500  VmaAllocation allocation)
    13501 {
    13502  CallParams callParams;
    13503  GetBasicParams(callParams);
    13504 
    13505  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13506  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13507  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13508  vkMemReq.size,
    13509  vkMemReq.alignment,
    13510  vkMemReq.memoryTypeBits,
    13511  createInfo.flags,
    13512  createInfo.usage,
    13513  createInfo.requiredFlags,
    13514  createInfo.preferredFlags,
    13515  createInfo.memoryTypeBits,
    13516  createInfo.pool,
    13517  allocation,
    13518  userDataStr.GetString());
    13519  Flush();
    13520 }
    13521 
    13522 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
    13523  const VkMemoryRequirements& vkMemReq,
    13524  const VmaAllocationCreateInfo& createInfo,
    13525  uint64_t allocationCount,
    13526  const VmaAllocation* pAllocations)
    13527 {
    13528  CallParams callParams;
    13529  GetBasicParams(callParams);
    13530 
    13531  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13532  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13533  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
    13534  vkMemReq.size,
    13535  vkMemReq.alignment,
    13536  vkMemReq.memoryTypeBits,
    13537  createInfo.flags,
    13538  createInfo.usage,
    13539  createInfo.requiredFlags,
    13540  createInfo.preferredFlags,
    13541  createInfo.memoryTypeBits,
    13542  createInfo.pool);
    13543  PrintPointerList(allocationCount, pAllocations);
    13544  fprintf(m_File, ",%s\n", userDataStr.GetString());
    13545  Flush();
    13546 }
    13547 
    13548 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    13549  const VkMemoryRequirements& vkMemReq,
    13550  bool requiresDedicatedAllocation,
    13551  bool prefersDedicatedAllocation,
    13552  const VmaAllocationCreateInfo& createInfo,
    13553  VmaAllocation allocation)
    13554 {
    13555  CallParams callParams;
    13556  GetBasicParams(callParams);
    13557 
    13558  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13559  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13560  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13561  vkMemReq.size,
    13562  vkMemReq.alignment,
    13563  vkMemReq.memoryTypeBits,
    13564  requiresDedicatedAllocation ? 1 : 0,
    13565  prefersDedicatedAllocation ? 1 : 0,
    13566  createInfo.flags,
    13567  createInfo.usage,
    13568  createInfo.requiredFlags,
    13569  createInfo.preferredFlags,
    13570  createInfo.memoryTypeBits,
    13571  createInfo.pool,
    13572  allocation,
    13573  userDataStr.GetString());
    13574  Flush();
    13575 }
    13576 
    13577 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
    13578  const VkMemoryRequirements& vkMemReq,
    13579  bool requiresDedicatedAllocation,
    13580  bool prefersDedicatedAllocation,
    13581  const VmaAllocationCreateInfo& createInfo,
    13582  VmaAllocation allocation)
    13583 {
    13584  CallParams callParams;
    13585  GetBasicParams(callParams);
    13586 
    13587  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13588  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13589  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13590  vkMemReq.size,
    13591  vkMemReq.alignment,
    13592  vkMemReq.memoryTypeBits,
    13593  requiresDedicatedAllocation ? 1 : 0,
    13594  prefersDedicatedAllocation ? 1 : 0,
    13595  createInfo.flags,
    13596  createInfo.usage,
    13597  createInfo.requiredFlags,
    13598  createInfo.preferredFlags,
    13599  createInfo.memoryTypeBits,
    13600  createInfo.pool,
    13601  allocation,
    13602  userDataStr.GetString());
    13603  Flush();
    13604 }
    13605 
    13606 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
    13607  VmaAllocation allocation)
    13608 {
    13609  CallParams callParams;
    13610  GetBasicParams(callParams);
    13611 
    13612  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13613  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    13614  allocation);
    13615  Flush();
    13616 }
    13617 
    13618 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
    13619  uint64_t allocationCount,
    13620  const VmaAllocation* pAllocations)
    13621 {
    13622  CallParams callParams;
    13623  GetBasicParams(callParams);
    13624 
    13625  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13626  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
    13627  PrintPointerList(allocationCount, pAllocations);
    13628  fprintf(m_File, "\n");
    13629  Flush();
    13630 }
    13631 
    13632 void VmaRecorder::RecordResizeAllocation(
    13633  uint32_t frameIndex,
    13634  VmaAllocation allocation,
    13635  VkDeviceSize newSize)
    13636 {
    13637  CallParams callParams;
    13638  GetBasicParams(callParams);
    13639 
    13640  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13641  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
    13642  allocation, newSize);
    13643  Flush();
    13644 }
    13645 
    13646 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
    13647  VmaAllocation allocation,
    13648  const void* pUserData)
    13649 {
    13650  CallParams callParams;
    13651  GetBasicParams(callParams);
    13652 
    13653  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13654  UserDataString userDataStr(
    13655  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
    13656  pUserData);
    13657  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13658  allocation,
    13659  userDataStr.GetString());
    13660  Flush();
    13661 }
    13662 
    13663 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
    13664  VmaAllocation allocation)
    13665 {
    13666  CallParams callParams;
    13667  GetBasicParams(callParams);
    13668 
    13669  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13670  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    13671  allocation);
    13672  Flush();
    13673 }
    13674 
    13675 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
    13676  VmaAllocation allocation)
    13677 {
    13678  CallParams callParams;
    13679  GetBasicParams(callParams);
    13680 
    13681  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13682  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    13683  allocation);
    13684  Flush();
    13685 }
    13686 
    13687 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
    13688  VmaAllocation allocation)
    13689 {
    13690  CallParams callParams;
    13691  GetBasicParams(callParams);
    13692 
    13693  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13694  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    13695  allocation);
    13696  Flush();
    13697 }
    13698 
    13699 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
    13700  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    13701 {
    13702  CallParams callParams;
    13703  GetBasicParams(callParams);
    13704 
    13705  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13706  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    13707  allocation,
    13708  offset,
    13709  size);
    13710  Flush();
    13711 }
    13712 
    13713 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
    13714  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    13715 {
    13716  CallParams callParams;
    13717  GetBasicParams(callParams);
    13718 
    13719  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13720  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    13721  allocation,
    13722  offset,
    13723  size);
    13724  Flush();
    13725 }
    13726 
    13727 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
    13728  const VkBufferCreateInfo& bufCreateInfo,
    13729  const VmaAllocationCreateInfo& allocCreateInfo,
    13730  VmaAllocation allocation)
    13731 {
    13732  CallParams callParams;
    13733  GetBasicParams(callParams);
    13734 
    13735  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13736  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    13737  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13738  bufCreateInfo.flags,
    13739  bufCreateInfo.size,
    13740  bufCreateInfo.usage,
    13741  bufCreateInfo.sharingMode,
    13742  allocCreateInfo.flags,
    13743  allocCreateInfo.usage,
    13744  allocCreateInfo.requiredFlags,
    13745  allocCreateInfo.preferredFlags,
    13746  allocCreateInfo.memoryTypeBits,
    13747  allocCreateInfo.pool,
    13748  allocation,
    13749  userDataStr.GetString());
    13750  Flush();
    13751 }
    13752 
    13753 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
    13754  const VkImageCreateInfo& imageCreateInfo,
    13755  const VmaAllocationCreateInfo& allocCreateInfo,
    13756  VmaAllocation allocation)
    13757 {
    13758  CallParams callParams;
    13759  GetBasicParams(callParams);
    13760 
    13761  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13762  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    13763  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13764  imageCreateInfo.flags,
    13765  imageCreateInfo.imageType,
    13766  imageCreateInfo.format,
    13767  imageCreateInfo.extent.width,
    13768  imageCreateInfo.extent.height,
    13769  imageCreateInfo.extent.depth,
    13770  imageCreateInfo.mipLevels,
    13771  imageCreateInfo.arrayLayers,
    13772  imageCreateInfo.samples,
    13773  imageCreateInfo.tiling,
    13774  imageCreateInfo.usage,
    13775  imageCreateInfo.sharingMode,
    13776  imageCreateInfo.initialLayout,
    13777  allocCreateInfo.flags,
    13778  allocCreateInfo.usage,
    13779  allocCreateInfo.requiredFlags,
    13780  allocCreateInfo.preferredFlags,
    13781  allocCreateInfo.memoryTypeBits,
    13782  allocCreateInfo.pool,
    13783  allocation,
    13784  userDataStr.GetString());
    13785  Flush();
    13786 }
    13787 
    13788 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
    13789  VmaAllocation allocation)
    13790 {
    13791  CallParams callParams;
    13792  GetBasicParams(callParams);
    13793 
    13794  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13795  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
    13796  allocation);
    13797  Flush();
    13798 }
    13799 
    13800 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
    13801  VmaAllocation allocation)
    13802 {
    13803  CallParams callParams;
    13804  GetBasicParams(callParams);
    13805 
    13806  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13807  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
    13808  allocation);
    13809  Flush();
    13810 }
    13811 
    13812 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
    13813  VmaAllocation allocation)
    13814 {
    13815  CallParams callParams;
    13816  GetBasicParams(callParams);
    13817 
    13818  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13819  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    13820  allocation);
    13821  Flush();
    13822 }
    13823 
    13824 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
    13825  VmaAllocation allocation)
    13826 {
    13827  CallParams callParams;
    13828  GetBasicParams(callParams);
    13829 
    13830  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13831  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
    13832  allocation);
    13833  Flush();
    13834 }
    13835 
    13836 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
    13837  VmaPool pool)
    13838 {
    13839  CallParams callParams;
    13840  GetBasicParams(callParams);
    13841 
    13842  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13843  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
    13844  pool);
    13845  Flush();
    13846 }
    13847 
    13848 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
    13849  const VmaDefragmentationInfo2& info,
    13851 {
    13852  CallParams callParams;
    13853  GetBasicParams(callParams);
    13854 
    13855  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13856  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
    13857  info.flags);
    13858  PrintPointerList(info.allocationCount, info.pAllocations);
    13859  fprintf(m_File, ",");
    13860  PrintPointerList(info.poolCount, info.pPools);
    13861  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
    13862  info.maxCpuBytesToMove,
    13864  info.maxGpuBytesToMove,
    13866  info.commandBuffer,
    13867  ctx);
    13868  Flush();
    13869 }
    13870 
    13871 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
    13873 {
    13874  CallParams callParams;
    13875  GetBasicParams(callParams);
    13876 
    13877  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13878  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
    13879  ctx);
    13880  Flush();
    13881 }
    13882 
    13883 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
    13884 {
    13885  if(pUserData != VMA_NULL)
    13886  {
    13887  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
    13888  {
    13889  m_Str = (const char*)pUserData;
    13890  }
    13891  else
    13892  {
    13893  sprintf_s(m_PtrStr, "%p", pUserData);
    13894  m_Str = m_PtrStr;
    13895  }
    13896  }
    13897  else
    13898  {
    13899  m_Str = "";
    13900  }
    13901 }
    13902 
    13903 void VmaRecorder::WriteConfiguration(
    13904  const VkPhysicalDeviceProperties& devProps,
    13905  const VkPhysicalDeviceMemoryProperties& memProps,
    13906  bool dedicatedAllocationExtensionEnabled)
    13907 {
    13908  fprintf(m_File, "Config,Begin\n");
    13909 
    13910  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
    13911  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
    13912  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
    13913  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
    13914  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
    13915  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
    13916 
    13917  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
    13918  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
    13919  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
    13920 
    13921  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
    13922  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
    13923  {
    13924  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
    13925  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
    13926  }
    13927  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
    13928  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
    13929  {
    13930  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
    13931  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
    13932  }
    13933 
    13934  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
    13935 
    13936  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
    13937  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
    13938  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
    13939  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
    13940  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
    13941  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
    13942  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
    13943  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
    13944  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    13945 
    13946  fprintf(m_File, "Config,End\n");
    13947 }
    13948 
    13949 void VmaRecorder::GetBasicParams(CallParams& outParams)
    13950 {
    13951  outParams.threadId = GetCurrentThreadId();
    13952 
    13953  LARGE_INTEGER counter;
    13954  QueryPerformanceCounter(&counter);
    13955  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
    13956 }
    13957 
    13958 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
    13959 {
    13960  if(count)
    13961  {
    13962  fprintf(m_File, "%p", pItems[0]);
    13963  for(uint64_t i = 1; i < count; ++i)
    13964  {
    13965  fprintf(m_File, " %p", pItems[i]);
    13966  }
    13967  }
    13968 }
    13969 
    13970 void VmaRecorder::Flush()
    13971 {
    13972  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
    13973  {
    13974  fflush(m_File);
    13975  }
    13976 }
    13977 
    13978 #endif // #if VMA_RECORDING_ENABLED
    13979 
    13981 // VmaAllocator_T
    13982 
    13983 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    13984  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    13985  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    13986  m_hDevice(pCreateInfo->device),
    13987  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    13988  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    13989  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    13990  m_PreferredLargeHeapBlockSize(0),
    13991  m_PhysicalDevice(pCreateInfo->physicalDevice),
    13992  m_CurrentFrameIndex(0),
    13993  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
    13994  m_NextPoolId(0)
    13996  ,m_pRecorder(VMA_NULL)
    13997 #endif
    13998 {
    13999  if(VMA_DEBUG_DETECT_CORRUPTION)
    14000  {
    14001  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
    14002  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
    14003  }
    14004 
    14005  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    14006 
    14007 #if !(VMA_DEDICATED_ALLOCATION)
    14009  {
    14010  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
    14011  }
    14012 #endif
    14013 
    14014  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    14015  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    14016  memset(&m_MemProps, 0, sizeof(m_MemProps));
    14017 
    14018  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    14019  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    14020 
    14021  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    14022  {
    14023  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    14024  }
    14025 
    14026  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    14027  {
    14028  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    14029  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    14030  }
    14031 
    14032  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    14033 
    14034  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    14035  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    14036 
    14037  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
    14038  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
    14039  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
    14040  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
    14041 
    14042  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    14043  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    14044 
    14045  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    14046  {
    14047  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    14048  {
    14049  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    14050  if(limit != VK_WHOLE_SIZE)
    14051  {
    14052  m_HeapSizeLimit[heapIndex] = limit;
    14053  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    14054  {
    14055  m_MemProps.memoryHeaps[heapIndex].size = limit;
    14056  }
    14057  }
    14058  }
    14059  }
    14060 
    14061  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    14062  {
    14063  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    14064 
    14065  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    14066  this,
    14067  memTypeIndex,
    14068  preferredBlockSize,
    14069  0,
    14070  SIZE_MAX,
    14071  GetBufferImageGranularity(),
    14072  pCreateInfo->frameInUseCount,
    14073  false, // isCustomPool
    14074  false, // explicitBlockSize
    14075  false); // linearAlgorithm
    14076  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    14077  // becase minBlockCount is 0.
    14078  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    14079 
    14080  }
    14081 }
    14082 
    14083 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
    14084 {
    14085  VkResult res = VK_SUCCESS;
    14086 
    14087  if(pCreateInfo->pRecordSettings != VMA_NULL &&
    14088  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
    14089  {
    14090 #if VMA_RECORDING_ENABLED
    14091  m_pRecorder = vma_new(this, VmaRecorder)();
    14092  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
    14093  if(res != VK_SUCCESS)
    14094  {
    14095  return res;
    14096  }
    14097  m_pRecorder->WriteConfiguration(
    14098  m_PhysicalDeviceProperties,
    14099  m_MemProps,
    14100  m_UseKhrDedicatedAllocation);
    14101  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
    14102 #else
    14103  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
    14104  return VK_ERROR_FEATURE_NOT_PRESENT;
    14105 #endif
    14106  }
    14107 
    14108  return res;
    14109 }
    14110 
    14111 VmaAllocator_T::~VmaAllocator_T()
    14112 {
    14113 #if VMA_RECORDING_ENABLED
    14114  if(m_pRecorder != VMA_NULL)
    14115  {
    14116  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
    14117  vma_delete(this, m_pRecorder);
    14118  }
    14119 #endif
    14120 
    14121  VMA_ASSERT(m_Pools.empty());
    14122 
    14123  for(size_t i = GetMemoryTypeCount(); i--; )
    14124  {
    14125  vma_delete(this, m_pDedicatedAllocations[i]);
    14126  vma_delete(this, m_pBlockVectors[i]);
    14127  }
    14128 }
    14129 
    14130 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    14131 {
    14132 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    14133  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
    14134  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
    14135  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    14136  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
    14137  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
    14138  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
    14139  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
    14140  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
    14141  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
    14142  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
    14143  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
    14144  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
    14145  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
    14146  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
    14147  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
    14148  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
    14149  m_VulkanFunctions.vkCmdCopyBuffer = &vkCmdCopyBuffer;
    14150 #if VMA_DEDICATED_ALLOCATION
    14151  if(m_UseKhrDedicatedAllocation)
    14152  {
    14153  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
    14154  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
    14155  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
    14156  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
    14157  }
    14158 #endif // #if VMA_DEDICATED_ALLOCATION
    14159 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    14160 
    14161 #define VMA_COPY_IF_NOT_NULL(funcName) \
    14162  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    14163 
    14164  if(pVulkanFunctions != VMA_NULL)
    14165  {
    14166  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    14167  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    14168  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    14169  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    14170  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    14171  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    14172  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
    14173  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
    14174  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    14175  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    14176  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    14177  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    14178  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    14179  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    14180  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    14181  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    14182  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
    14183 #if VMA_DEDICATED_ALLOCATION
    14184  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    14185  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    14186 #endif
    14187  }
    14188 
    14189 #undef VMA_COPY_IF_NOT_NULL
    14190 
    14191  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    14192  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    14193  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    14194  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    14195  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    14196  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    14197  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    14198  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    14199  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
    14200  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
    14201  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    14202  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    14203  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    14204  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    14205  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    14206  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    14207  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    14208  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    14209  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
    14210 #if VMA_DEDICATED_ALLOCATION
    14211  if(m_UseKhrDedicatedAllocation)
    14212  {
    14213  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    14214  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    14215  }
    14216 #endif
    14217 }
    14218 
    14219 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    14220 {
    14221  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    14222  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    14223  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
    14224  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
    14225 }
    14226 
    14227 VkResult VmaAllocator_T::AllocateMemoryOfType(
    14228  VkDeviceSize size,
    14229  VkDeviceSize alignment,
    14230  bool dedicatedAllocation,
    14231  VkBuffer dedicatedBuffer,
    14232  VkImage dedicatedImage,
    14233  const VmaAllocationCreateInfo& createInfo,
    14234  uint32_t memTypeIndex,
    14235  VmaSuballocationType suballocType,
    14236  size_t allocationCount,
    14237  VmaAllocation* pAllocations)
    14238 {
    14239  VMA_ASSERT(pAllocations != VMA_NULL);
    14240  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, vkMemReq.size);
    14241 
    14242  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    14243 
    14244  // If memory type is not HOST_VISIBLE, disable MAPPED.
    14245  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    14246  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    14247  {
    14248  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    14249  }
    14250 
    14251  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    14252  VMA_ASSERT(blockVector);
    14253 
    14254  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    14255  bool preferDedicatedMemory =
    14256  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    14257  dedicatedAllocation ||
    14258  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    14259  size > preferredBlockSize / 2;
    14260 
    14261  if(preferDedicatedMemory &&
    14262  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    14263  finalCreateInfo.pool == VK_NULL_HANDLE)
    14264  {
    14266  }
    14267 
    14268  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    14269  {
    14270  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14271  {
    14272  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14273  }
    14274  else
    14275  {
    14276  return AllocateDedicatedMemory(
    14277  size,
    14278  suballocType,
    14279  memTypeIndex,
    14280  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    14281  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    14282  finalCreateInfo.pUserData,
    14283  dedicatedBuffer,
    14284  dedicatedImage,
    14285  allocationCount,
    14286  pAllocations);
    14287  }
    14288  }
    14289  else
    14290  {
    14291  VkResult res = blockVector->Allocate(
    14292  VK_NULL_HANDLE, // hCurrentPool
    14293  m_CurrentFrameIndex.load(),
    14294  size,
    14295  alignment,
    14296  finalCreateInfo,
    14297  suballocType,
    14298  allocationCount,
    14299  pAllocations);
    14300  if(res == VK_SUCCESS)
    14301  {
    14302  return res;
    14303  }
    14304 
    14305  // 5. Try dedicated memory.
    14306  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14307  {
    14308  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14309  }
    14310  else
    14311  {
    14312  res = AllocateDedicatedMemory(
    14313  size,
    14314  suballocType,
    14315  memTypeIndex,
    14316  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    14317  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    14318  finalCreateInfo.pUserData,
    14319  dedicatedBuffer,
    14320  dedicatedImage,
    14321  allocationCount,
    14322  pAllocations);
    14323  if(res == VK_SUCCESS)
    14324  {
    14325  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    14326  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    14327  return VK_SUCCESS;
    14328  }
    14329  else
    14330  {
    14331  // Everything failed: Return error code.
    14332  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    14333  return res;
    14334  }
    14335  }
    14336  }
    14337 }
    14338 
    14339 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    14340  VkDeviceSize size,
    14341  VmaSuballocationType suballocType,
    14342  uint32_t memTypeIndex,
    14343  bool map,
    14344  bool isUserDataString,
    14345  void* pUserData,
    14346  VkBuffer dedicatedBuffer,
    14347  VkImage dedicatedImage,
    14348  size_t allocationCount,
    14349  VmaAllocation* pAllocations)
    14350 {
    14351  VMA_ASSERT(allocationCount > 0 && pAllocations);
    14352 
    14353  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    14354  allocInfo.memoryTypeIndex = memTypeIndex;
    14355  allocInfo.allocationSize = size;
    14356 
    14357 #if VMA_DEDICATED_ALLOCATION
    14358  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    14359  if(m_UseKhrDedicatedAllocation)
    14360  {
    14361  if(dedicatedBuffer != VK_NULL_HANDLE)
    14362  {
    14363  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    14364  dedicatedAllocInfo.buffer = dedicatedBuffer;
    14365  allocInfo.pNext = &dedicatedAllocInfo;
    14366  }
    14367  else if(dedicatedImage != VK_NULL_HANDLE)
    14368  {
    14369  dedicatedAllocInfo.image = dedicatedImage;
    14370  allocInfo.pNext = &dedicatedAllocInfo;
    14371  }
    14372  }
    14373 #endif // #if VMA_DEDICATED_ALLOCATION
    14374 
    14375  size_t allocIndex;
    14376  VkResult res;
    14377  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    14378  {
    14379  res = AllocateDedicatedMemoryPage(
    14380  size,
    14381  suballocType,
    14382  memTypeIndex,
    14383  allocInfo,
    14384  map,
    14385  isUserDataString,
    14386  pUserData,
    14387  pAllocations + allocIndex);
    14388  if(res != VK_SUCCESS)
    14389  {
    14390  break;
    14391  }
    14392  }
    14393 
    14394  if(res == VK_SUCCESS)
    14395  {
    14396  // Register them in m_pDedicatedAllocations.
    14397  {
    14398  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    14399  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    14400  VMA_ASSERT(pDedicatedAllocations);
    14401  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    14402  {
    14403  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
    14404  }
    14405  }
    14406 
    14407  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
    14408  }
    14409  else
    14410  {
    14411  // Free all already created allocations.
    14412  while(allocIndex--)
    14413  {
    14414  VmaAllocation currAlloc = pAllocations[allocIndex];
    14415  VkDeviceMemory hMemory = currAlloc->GetMemory();
    14416 
    14417  /*
    14418  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    14419  before vkFreeMemory.
    14420 
    14421  if(currAlloc->GetMappedData() != VMA_NULL)
    14422  {
    14423  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    14424  }
    14425  */
    14426 
    14427  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
    14428 
    14429  currAlloc->SetUserData(this, VMA_NULL);
    14430  vma_delete(this, currAlloc);
    14431  }
    14432 
    14433  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
    14434  }
    14435 
    14436  return res;
    14437 }
    14438 
    14439 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
    14440  VkDeviceSize size,
    14441  VmaSuballocationType suballocType,
    14442  uint32_t memTypeIndex,
    14443  const VkMemoryAllocateInfo& allocInfo,
    14444  bool map,
    14445  bool isUserDataString,
    14446  void* pUserData,
    14447  VmaAllocation* pAllocation)
    14448 {
    14449  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    14450  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    14451  if(res < 0)
    14452  {
    14453  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    14454  return res;
    14455  }
    14456 
    14457  void* pMappedData = VMA_NULL;
    14458  if(map)
    14459  {
    14460  res = (*m_VulkanFunctions.vkMapMemory)(
    14461  m_hDevice,
    14462  hMemory,
    14463  0,
    14464  VK_WHOLE_SIZE,
    14465  0,
    14466  &pMappedData);
    14467  if(res < 0)
    14468  {
    14469  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    14470  FreeVulkanMemory(memTypeIndex, size, hMemory);
    14471  return res;
    14472  }
    14473  }
    14474 
    14475  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
    14476  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
    14477  (*pAllocation)->SetUserData(this, pUserData);
    14478  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    14479  {
    14480  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    14481  }
    14482 
    14483  return VK_SUCCESS;
    14484 }
    14485 
    14486 void VmaAllocator_T::GetBufferMemoryRequirements(
    14487  VkBuffer hBuffer,
    14488  VkMemoryRequirements& memReq,
    14489  bool& requiresDedicatedAllocation,
    14490  bool& prefersDedicatedAllocation) const
    14491 {
    14492 #if VMA_DEDICATED_ALLOCATION
    14493  if(m_UseKhrDedicatedAllocation)
    14494  {
    14495  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    14496  memReqInfo.buffer = hBuffer;
    14497 
    14498  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    14499 
    14500  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    14501  memReq2.pNext = &memDedicatedReq;
    14502 
    14503  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    14504 
    14505  memReq = memReq2.memoryRequirements;
    14506  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    14507  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    14508  }
    14509  else
    14510 #endif // #if VMA_DEDICATED_ALLOCATION
    14511  {
    14512  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    14513  requiresDedicatedAllocation = false;
    14514  prefersDedicatedAllocation = false;
    14515  }
    14516 }
    14517 
    14518 void VmaAllocator_T::GetImageMemoryRequirements(
    14519  VkImage hImage,
    14520  VkMemoryRequirements& memReq,
    14521  bool& requiresDedicatedAllocation,
    14522  bool& prefersDedicatedAllocation) const
    14523 {
    14524 #if VMA_DEDICATED_ALLOCATION
    14525  if(m_UseKhrDedicatedAllocation)
    14526  {
    14527  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    14528  memReqInfo.image = hImage;
    14529 
    14530  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    14531 
    14532  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    14533  memReq2.pNext = &memDedicatedReq;
    14534 
    14535  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    14536 
    14537  memReq = memReq2.memoryRequirements;
    14538  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    14539  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    14540  }
    14541  else
    14542 #endif // #if VMA_DEDICATED_ALLOCATION
    14543  {
    14544  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    14545  requiresDedicatedAllocation = false;
    14546  prefersDedicatedAllocation = false;
    14547  }
    14548 }
    14549 
    14550 VkResult VmaAllocator_T::AllocateMemory(
    14551  const VkMemoryRequirements& vkMemReq,
    14552  bool requiresDedicatedAllocation,
    14553  bool prefersDedicatedAllocation,
    14554  VkBuffer dedicatedBuffer,
    14555  VkImage dedicatedImage,
    14556  const VmaAllocationCreateInfo& createInfo,
    14557  VmaSuballocationType suballocType,
    14558  size_t allocationCount,
    14559  VmaAllocation* pAllocations)
    14560 {
    14561  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
    14562 
    14563  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
    14564 
    14565  if(vkMemReq.size == 0)
    14566  {
    14567  return VK_ERROR_VALIDATION_FAILED_EXT;
    14568  }
    14569  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    14570  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14571  {
    14572  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    14573  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14574  }
    14575  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    14577  {
    14578  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    14579  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14580  }
    14581  if(requiresDedicatedAllocation)
    14582  {
    14583  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14584  {
    14585  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    14586  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14587  }
    14588  if(createInfo.pool != VK_NULL_HANDLE)
    14589  {
    14590  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    14591  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14592  }
    14593  }
    14594  if((createInfo.pool != VK_NULL_HANDLE) &&
    14595  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    14596  {
    14597  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    14598  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14599  }
    14600 
    14601  if(createInfo.pool != VK_NULL_HANDLE)
    14602  {
    14603  const VkDeviceSize alignmentForPool = VMA_MAX(
    14604  vkMemReq.alignment,
    14605  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
    14606  return createInfo.pool->m_BlockVector.Allocate(
    14607  createInfo.pool,
    14608  m_CurrentFrameIndex.load(),
    14609  vkMemReq.size,
    14610  alignmentForPool,
    14611  createInfo,
    14612  suballocType,
    14613  allocationCount,
    14614  pAllocations);
    14615  }
    14616  else
    14617  {
    14618  // Bit mask of memory Vulkan types acceptable for this allocation.
    14619  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    14620  uint32_t memTypeIndex = UINT32_MAX;
    14621  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    14622  if(res == VK_SUCCESS)
    14623  {
    14624  VkDeviceSize alignmentForMemType = VMA_MAX(
    14625  vkMemReq.alignment,
    14626  GetMemoryTypeMinAlignment(memTypeIndex));
    14627 
    14628  res = AllocateMemoryOfType(
    14629  vkMemReq.size,
    14630  alignmentForMemType,
    14631  requiresDedicatedAllocation || prefersDedicatedAllocation,
    14632  dedicatedBuffer,
    14633  dedicatedImage,
    14634  createInfo,
    14635  memTypeIndex,
    14636  suballocType,
    14637  allocationCount,
    14638  pAllocations);
    14639  // Succeeded on first try.
    14640  if(res == VK_SUCCESS)
    14641  {
    14642  return res;
    14643  }
    14644  // Allocation from this memory type failed. Try other compatible memory types.
    14645  else
    14646  {
    14647  for(;;)
    14648  {
    14649  // Remove old memTypeIndex from list of possibilities.
    14650  memoryTypeBits &= ~(1u << memTypeIndex);
    14651  // Find alternative memTypeIndex.
    14652  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    14653  if(res == VK_SUCCESS)
    14654  {
    14655  alignmentForMemType = VMA_MAX(
    14656  vkMemReq.alignment,
    14657  GetMemoryTypeMinAlignment(memTypeIndex));
    14658 
    14659  res = AllocateMemoryOfType(
    14660  vkMemReq.size,
    14661  alignmentForMemType,
    14662  requiresDedicatedAllocation || prefersDedicatedAllocation,
    14663  dedicatedBuffer,
    14664  dedicatedImage,
    14665  createInfo,
    14666  memTypeIndex,
    14667  suballocType,
    14668  allocationCount,
    14669  pAllocations);
    14670  // Allocation from this alternative memory type succeeded.
    14671  if(res == VK_SUCCESS)
    14672  {
    14673  return res;
    14674  }
    14675  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    14676  }
    14677  // No other matching memory type index could be found.
    14678  else
    14679  {
    14680  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    14681  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14682  }
    14683  }
    14684  }
    14685  }
    14686  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    14687  else
    14688  return res;
    14689  }
    14690 }
    14691 
    14692 void VmaAllocator_T::FreeMemory(
    14693  size_t allocationCount,
    14694  const VmaAllocation* pAllocations)
    14695 {
    14696  VMA_ASSERT(pAllocations);
    14697 
    14698  for(size_t allocIndex = allocationCount; allocIndex--; )
    14699  {
    14700  VmaAllocation allocation = pAllocations[allocIndex];
    14701 
    14702  if(allocation != VK_NULL_HANDLE)
    14703  {
    14704  if(TouchAllocation(allocation))
    14705  {
    14706  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    14707  {
    14708  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
    14709  }
    14710 
    14711  switch(allocation->GetType())
    14712  {
    14713  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    14714  {
    14715  VmaBlockVector* pBlockVector = VMA_NULL;
    14716  VmaPool hPool = allocation->GetPool();
    14717  if(hPool != VK_NULL_HANDLE)
    14718  {
    14719  pBlockVector = &hPool->m_BlockVector;
    14720  }
    14721  else
    14722  {
    14723  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    14724  pBlockVector = m_pBlockVectors[memTypeIndex];
    14725  }
    14726  pBlockVector->Free(allocation);
    14727  }
    14728  break;
    14729  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    14730  FreeDedicatedMemory(allocation);
    14731  break;
    14732  default:
    14733  VMA_ASSERT(0);
    14734  }
    14735  }
    14736 
    14737  allocation->SetUserData(this, VMA_NULL);
    14738  vma_delete(this, allocation);
    14739  }
    14740  }
    14741 }
    14742 
    14743 VkResult VmaAllocator_T::ResizeAllocation(
    14744  const VmaAllocation alloc,
    14745  VkDeviceSize newSize)
    14746 {
    14747  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
    14748  {
    14749  return VK_ERROR_VALIDATION_FAILED_EXT;
    14750  }
    14751  if(newSize == alloc->GetSize())
    14752  {
    14753  return VK_SUCCESS;
    14754  }
    14755 
    14756  switch(alloc->GetType())
    14757  {
    14758  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    14759  return VK_ERROR_FEATURE_NOT_PRESENT;
    14760  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    14761  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
    14762  {
    14763  alloc->ChangeSize(newSize);
    14764  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
    14765  return VK_SUCCESS;
    14766  }
    14767  else
    14768  {
    14769  return VK_ERROR_OUT_OF_POOL_MEMORY;
    14770  }
    14771  default:
    14772  VMA_ASSERT(0);
    14773  return VK_ERROR_VALIDATION_FAILED_EXT;
    14774  }
    14775 }
    14776 
    14777 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    14778 {
    14779  // Initialize.
    14780  InitStatInfo(pStats->total);
    14781  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    14782  InitStatInfo(pStats->memoryType[i]);
    14783  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    14784  InitStatInfo(pStats->memoryHeap[i]);
    14785 
    14786  // Process default pools.
    14787  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    14788  {
    14789  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    14790  VMA_ASSERT(pBlockVector);
    14791  pBlockVector->AddStats(pStats);
    14792  }
    14793 
    14794  // Process custom pools.
    14795  {
    14796  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
    14797  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    14798  {
    14799  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
    14800  }
    14801  }
    14802 
    14803  // Process dedicated allocations.
    14804  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    14805  {
    14806  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    14807  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    14808  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    14809  VMA_ASSERT(pDedicatedAllocVector);
    14810  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    14811  {
    14812  VmaStatInfo allocationStatInfo;
    14813  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    14814  VmaAddStatInfo(pStats->total, allocationStatInfo);
    14815  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    14816  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    14817  }
    14818  }
    14819 
    14820  // Postprocess.
    14821  VmaPostprocessCalcStatInfo(pStats->total);
    14822  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    14823  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    14824  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    14825  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    14826 }
    14827 
    14828 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    14829 
    14830 VkResult VmaAllocator_T::DefragmentationBegin(
    14831  const VmaDefragmentationInfo2& info,
    14832  VmaDefragmentationStats* pStats,
    14833  VmaDefragmentationContext* pContext)
    14834 {
    14835  if(info.pAllocationsChanged != VMA_NULL)
    14836  {
    14837  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
    14838  }
    14839 
    14840  *pContext = vma_new(this, VmaDefragmentationContext_T)(
    14841  this, m_CurrentFrameIndex.load(), info.flags, pStats);
    14842 
    14843  (*pContext)->AddPools(info.poolCount, info.pPools);
    14844  (*pContext)->AddAllocations(
    14846 
    14847  VkResult res = (*pContext)->Defragment(
    14850  info.commandBuffer, pStats);
    14851 
    14852  if(res != VK_NOT_READY)
    14853  {
    14854  vma_delete(this, *pContext);
    14855  *pContext = VMA_NULL;
    14856  }
    14857 
    14858  return res;
    14859 }
    14860 
    14861 VkResult VmaAllocator_T::DefragmentationEnd(
    14862  VmaDefragmentationContext context)
    14863 {
    14864  vma_delete(this, context);
    14865  return VK_SUCCESS;
    14866 }
    14867 
    14868 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    14869 {
    14870  if(hAllocation->CanBecomeLost())
    14871  {
    14872  /*
    14873  Warning: This is a carefully designed algorithm.
    14874  Do not modify unless you really know what you're doing :)
    14875  */
    14876  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    14877  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    14878  for(;;)
    14879  {
    14880  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    14881  {
    14882  pAllocationInfo->memoryType = UINT32_MAX;
    14883  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    14884  pAllocationInfo->offset = 0;
    14885  pAllocationInfo->size = hAllocation->GetSize();
    14886  pAllocationInfo->pMappedData = VMA_NULL;
    14887  pAllocationInfo->pUserData = hAllocation->GetUserData();
    14888  return;
    14889  }
    14890  else if(localLastUseFrameIndex == localCurrFrameIndex)
    14891  {
    14892  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    14893  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    14894  pAllocationInfo->offset = hAllocation->GetOffset();
    14895  pAllocationInfo->size = hAllocation->GetSize();
    14896  pAllocationInfo->pMappedData = VMA_NULL;
    14897  pAllocationInfo->pUserData = hAllocation->GetUserData();
    14898  return;
    14899  }
    14900  else // Last use time earlier than current time.
    14901  {
    14902  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    14903  {
    14904  localLastUseFrameIndex = localCurrFrameIndex;
    14905  }
    14906  }
    14907  }
    14908  }
    14909  else
    14910  {
    14911 #if VMA_STATS_STRING_ENABLED
    14912  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    14913  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    14914  for(;;)
    14915  {
    14916  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    14917  if(localLastUseFrameIndex == localCurrFrameIndex)
    14918  {
    14919  break;
    14920  }
    14921  else // Last use time earlier than current time.
    14922  {
    14923  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    14924  {
    14925  localLastUseFrameIndex = localCurrFrameIndex;
    14926  }
    14927  }
    14928  }
    14929 #endif
    14930 
    14931  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    14932  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    14933  pAllocationInfo->offset = hAllocation->GetOffset();
    14934  pAllocationInfo->size = hAllocation->GetSize();
    14935  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    14936  pAllocationInfo->pUserData = hAllocation->GetUserData();
    14937  }
    14938 }
    14939 
    14940 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
    14941 {
    14942  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
    14943  if(hAllocation->CanBecomeLost())
    14944  {
    14945  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    14946  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    14947  for(;;)
    14948  {
    14949  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    14950  {
    14951  return false;
    14952  }
    14953  else if(localLastUseFrameIndex == localCurrFrameIndex)
    14954  {
    14955  return true;
    14956  }
    14957  else // Last use time earlier than current time.
    14958  {
    14959  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    14960  {
    14961  localLastUseFrameIndex = localCurrFrameIndex;
    14962  }
    14963  }
    14964  }
    14965  }
    14966  else
    14967  {
    14968 #if VMA_STATS_STRING_ENABLED
    14969  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    14970  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    14971  for(;;)
    14972  {
    14973  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    14974  if(localLastUseFrameIndex == localCurrFrameIndex)
    14975  {
    14976  break;
    14977  }
    14978  else // Last use time earlier than current time.
    14979  {
    14980  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    14981  {
    14982  localLastUseFrameIndex = localCurrFrameIndex;
    14983  }
    14984  }
    14985  }
    14986 #endif
    14987 
    14988  return true;
    14989  }
    14990 }
    14991 
    14992 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    14993 {
    14994  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
    14995 
    14996  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    14997 
    14998  if(newCreateInfo.maxBlockCount == 0)
    14999  {
    15000  newCreateInfo.maxBlockCount = SIZE_MAX;
    15001  }
    15002  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
    15003  {
    15004  return VK_ERROR_INITIALIZATION_FAILED;
    15005  }
    15006 
    15007  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    15008 
    15009  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
    15010 
    15011  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    15012  if(res != VK_SUCCESS)
    15013  {
    15014  vma_delete(this, *pPool);
    15015  *pPool = VMA_NULL;
    15016  return res;
    15017  }
    15018 
    15019  // Add to m_Pools.
    15020  {
    15021  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
    15022  (*pPool)->SetId(m_NextPoolId++);
    15023  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    15024  }
    15025 
    15026  return VK_SUCCESS;
    15027 }
    15028 
    15029 void VmaAllocator_T::DestroyPool(VmaPool pool)
    15030 {
    15031  // Remove from m_Pools.
    15032  {
    15033  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
    15034  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    15035  VMA_ASSERT(success && "Pool not found in Allocator.");
    15036  }
    15037 
    15038  vma_delete(this, pool);
    15039 }
    15040 
    15041 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    15042 {
    15043  pool->m_BlockVector.GetPoolStats(pPoolStats);
    15044 }
    15045 
    15046 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    15047 {
    15048  m_CurrentFrameIndex.store(frameIndex);
    15049 }
    15050 
    15051 void VmaAllocator_T::MakePoolAllocationsLost(
    15052  VmaPool hPool,
    15053  size_t* pLostAllocationCount)
    15054 {
    15055  hPool->m_BlockVector.MakePoolAllocationsLost(
    15056  m_CurrentFrameIndex.load(),
    15057  pLostAllocationCount);
    15058 }
    15059 
    15060 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
    15061 {
    15062  return hPool->m_BlockVector.CheckCorruption();
    15063 }
    15064 
    15065 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
    15066 {
    15067  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
    15068 
    15069  // Process default pools.
    15070  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15071  {
    15072  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
    15073  {
    15074  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    15075  VMA_ASSERT(pBlockVector);
    15076  VkResult localRes = pBlockVector->CheckCorruption();
    15077  switch(localRes)
    15078  {
    15079  case VK_ERROR_FEATURE_NOT_PRESENT:
    15080  break;
    15081  case VK_SUCCESS:
    15082  finalRes = VK_SUCCESS;
    15083  break;
    15084  default:
    15085  return localRes;
    15086  }
    15087  }
    15088  }
    15089 
    15090  // Process custom pools.
    15091  {
    15092  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
    15093  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    15094  {
    15095  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
    15096  {
    15097  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
    15098  switch(localRes)
    15099  {
    15100  case VK_ERROR_FEATURE_NOT_PRESENT:
    15101  break;
    15102  case VK_SUCCESS:
    15103  finalRes = VK_SUCCESS;
    15104  break;
    15105  default:
    15106  return localRes;
    15107  }
    15108  }
    15109  }
    15110  }
    15111 
    15112  return finalRes;
    15113 }
    15114 
    15115 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    15116 {
    15117  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
    15118  (*pAllocation)->InitLost();
    15119 }
    15120 
    15121 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    15122 {
    15123  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    15124 
    15125  VkResult res;
    15126  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    15127  {
    15128  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    15129  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    15130  {
    15131  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    15132  if(res == VK_SUCCESS)
    15133  {
    15134  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    15135  }
    15136  }
    15137  else
    15138  {
    15139  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    15140  }
    15141  }
    15142  else
    15143  {
    15144  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    15145  }
    15146 
    15147  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    15148  {
    15149  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    15150  }
    15151 
    15152  return res;
    15153 }
    15154 
    15155 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    15156 {
    15157  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    15158  {
    15159  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    15160  }
    15161 
    15162  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    15163 
    15164  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    15165  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    15166  {
    15167  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    15168  m_HeapSizeLimit[heapIndex] += size;
    15169  }
    15170 }
    15171 
    15172 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    15173 {
    15174  if(hAllocation->CanBecomeLost())
    15175  {
    15176  return VK_ERROR_MEMORY_MAP_FAILED;
    15177  }
    15178 
    15179  switch(hAllocation->GetType())
    15180  {
    15181  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15182  {
    15183  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    15184  char *pBytes = VMA_NULL;
    15185  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
    15186  if(res == VK_SUCCESS)
    15187  {
    15188  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    15189  hAllocation->BlockAllocMap();
    15190  }
    15191  return res;
    15192  }
    15193  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15194  return hAllocation->DedicatedAllocMap(this, ppData);
    15195  default:
    15196  VMA_ASSERT(0);
    15197  return VK_ERROR_MEMORY_MAP_FAILED;
    15198  }
    15199 }
    15200 
    15201 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    15202 {
    15203  switch(hAllocation->GetType())
    15204  {
    15205  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15206  {
    15207  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    15208  hAllocation->BlockAllocUnmap();
    15209  pBlock->Unmap(this, 1);
    15210  }
    15211  break;
    15212  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15213  hAllocation->DedicatedAllocUnmap(this);
    15214  break;
    15215  default:
    15216  VMA_ASSERT(0);
    15217  }
    15218 }
    15219 
    15220 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
    15221 {
    15222  VkResult res = VK_SUCCESS;
    15223  switch(hAllocation->GetType())
    15224  {
    15225  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15226  res = GetVulkanFunctions().vkBindBufferMemory(
    15227  m_hDevice,
    15228  hBuffer,
    15229  hAllocation->GetMemory(),
    15230  0); //memoryOffset
    15231  break;
    15232  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15233  {
    15234  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    15235  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
    15236  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
    15237  break;
    15238  }
    15239  default:
    15240  VMA_ASSERT(0);
    15241  }
    15242  return res;
    15243 }
    15244 
    15245 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
    15246 {
    15247  VkResult res = VK_SUCCESS;
    15248  switch(hAllocation->GetType())
    15249  {
    15250  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15251  res = GetVulkanFunctions().vkBindImageMemory(
    15252  m_hDevice,
    15253  hImage,
    15254  hAllocation->GetMemory(),
    15255  0); //memoryOffset
    15256  break;
    15257  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15258  {
    15259  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    15260  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
    15261  res = pBlock->BindImageMemory(this, hAllocation, hImage);
    15262  break;
    15263  }
    15264  default:
    15265  VMA_ASSERT(0);
    15266  }
    15267  return res;
    15268 }
    15269 
    15270 void VmaAllocator_T::FlushOrInvalidateAllocation(
    15271  VmaAllocation hAllocation,
    15272  VkDeviceSize offset, VkDeviceSize size,
    15273  VMA_CACHE_OPERATION op)
    15274 {
    15275  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
    15276  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
    15277  {
    15278  const VkDeviceSize allocationSize = hAllocation->GetSize();
    15279  VMA_ASSERT(offset <= allocationSize);
    15280 
    15281  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    15282 
    15283  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    15284  memRange.memory = hAllocation->GetMemory();
    15285 
    15286  switch(hAllocation->GetType())
    15287  {
    15288  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15289  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    15290  if(size == VK_WHOLE_SIZE)
    15291  {
    15292  memRange.size = allocationSize - memRange.offset;
    15293  }
    15294  else
    15295  {
    15296  VMA_ASSERT(offset + size <= allocationSize);
    15297  memRange.size = VMA_MIN(
    15298  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
    15299  allocationSize - memRange.offset);
    15300  }
    15301  break;
    15302 
    15303  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15304  {
    15305  // 1. Still within this allocation.
    15306  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    15307  if(size == VK_WHOLE_SIZE)
    15308  {
    15309  size = allocationSize - offset;
    15310  }
    15311  else
    15312  {
    15313  VMA_ASSERT(offset + size <= allocationSize);
    15314  }
    15315  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
    15316 
    15317  // 2. Adjust to whole block.
    15318  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
    15319  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
    15320  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
    15321  memRange.offset += allocationOffset;
    15322  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
    15323 
    15324  break;
    15325  }
    15326 
    15327  default:
    15328  VMA_ASSERT(0);
    15329  }
    15330 
    15331  switch(op)
    15332  {
    15333  case VMA_CACHE_FLUSH:
    15334  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
    15335  break;
    15336  case VMA_CACHE_INVALIDATE:
    15337  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
    15338  break;
    15339  default:
    15340  VMA_ASSERT(0);
    15341  }
    15342  }
    15343  // else: Just ignore this call.
    15344 }
    15345 
    15346 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    15347 {
    15348  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    15349 
    15350  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    15351  {
    15352  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    15353  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    15354  VMA_ASSERT(pDedicatedAllocations);
    15355  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    15356  VMA_ASSERT(success);
    15357  }
    15358 
    15359  VkDeviceMemory hMemory = allocation->GetMemory();
    15360 
    15361  /*
    15362  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    15363  before vkFreeMemory.
    15364 
    15365  if(allocation->GetMappedData() != VMA_NULL)
    15366  {
    15367  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    15368  }
    15369  */
    15370 
    15371  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    15372 
    15373  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    15374 }
    15375 
    15376 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
    15377 {
    15378  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
    15379  !hAllocation->CanBecomeLost() &&
    15380  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    15381  {
    15382  void* pData = VMA_NULL;
    15383  VkResult res = Map(hAllocation, &pData);
    15384  if(res == VK_SUCCESS)
    15385  {
    15386  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
    15387  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
    15388  Unmap(hAllocation);
    15389  }
    15390  else
    15391  {
    15392  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
    15393  }
    15394  }
    15395 }
    15396 
    15397 #if VMA_STATS_STRING_ENABLED
    15398 
    15399 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    15400 {
    15401  bool dedicatedAllocationsStarted = false;
    15402  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15403  {
    15404  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    15405  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    15406  VMA_ASSERT(pDedicatedAllocVector);
    15407  if(pDedicatedAllocVector->empty() == false)
    15408  {
    15409  if(dedicatedAllocationsStarted == false)
    15410  {
    15411  dedicatedAllocationsStarted = true;
    15412  json.WriteString("DedicatedAllocations");
    15413  json.BeginObject();
    15414  }
    15415 
    15416  json.BeginString("Type ");
    15417  json.ContinueString(memTypeIndex);
    15418  json.EndString();
    15419 
    15420  json.BeginArray();
    15421 
    15422  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    15423  {
    15424  json.BeginObject(true);
    15425  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    15426  hAlloc->PrintParameters(json);
    15427  json.EndObject();
    15428  }
    15429 
    15430  json.EndArray();
    15431  }
    15432  }
    15433  if(dedicatedAllocationsStarted)
    15434  {
    15435  json.EndObject();
    15436  }
    15437 
    15438  {
    15439  bool allocationsStarted = false;
    15440  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15441  {
    15442  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    15443  {
    15444  if(allocationsStarted == false)
    15445  {
    15446  allocationsStarted = true;
    15447  json.WriteString("DefaultPools");
    15448  json.BeginObject();
    15449  }
    15450 
    15451  json.BeginString("Type ");
    15452  json.ContinueString(memTypeIndex);
    15453  json.EndString();
    15454 
    15455  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    15456  }
    15457  }
    15458  if(allocationsStarted)
    15459  {
    15460  json.EndObject();
    15461  }
    15462  }
    15463 
    15464  // Custom pools
    15465  {
    15466  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
    15467  const size_t poolCount = m_Pools.size();
    15468  if(poolCount > 0)
    15469  {
    15470  json.WriteString("Pools");
    15471  json.BeginObject();
    15472  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    15473  {
    15474  json.BeginString();
    15475  json.ContinueString(m_Pools[poolIndex]->GetId());
    15476  json.EndString();
    15477 
    15478  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    15479  }
    15480  json.EndObject();
    15481  }
    15482  }
    15483 }
    15484 
    15485 #endif // #if VMA_STATS_STRING_ENABLED
    15486 
    15488 // Public interface
    15489 
    15490 VkResult vmaCreateAllocator(
    15491  const VmaAllocatorCreateInfo* pCreateInfo,
    15492  VmaAllocator* pAllocator)
    15493 {
    15494  VMA_ASSERT(pCreateInfo && pAllocator);
    15495  VMA_DEBUG_LOG("vmaCreateAllocator");
    15496  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    15497  return (*pAllocator)->Init(pCreateInfo);
    15498 }
    15499 
    15500 void vmaDestroyAllocator(
    15501  VmaAllocator allocator)
    15502 {
    15503  if(allocator != VK_NULL_HANDLE)
    15504  {
    15505  VMA_DEBUG_LOG("vmaDestroyAllocator");
    15506  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    15507  vma_delete(&allocationCallbacks, allocator);
    15508  }
    15509 }
    15510 
    15512  VmaAllocator allocator,
    15513  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    15514 {
    15515  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    15516  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    15517 }
    15518 
    15520  VmaAllocator allocator,
    15521  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    15522 {
    15523  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    15524  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    15525 }
    15526 
    15528  VmaAllocator allocator,
    15529  uint32_t memoryTypeIndex,
    15530  VkMemoryPropertyFlags* pFlags)
    15531 {
    15532  VMA_ASSERT(allocator && pFlags);
    15533  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    15534  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    15535 }
    15536 
    15538  VmaAllocator allocator,
    15539  uint32_t frameIndex)
    15540 {
    15541  VMA_ASSERT(allocator);
    15542  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    15543 
    15544  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15545 
    15546  allocator->SetCurrentFrameIndex(frameIndex);
    15547 }
    15548 
    15549 void vmaCalculateStats(
    15550  VmaAllocator allocator,
    15551  VmaStats* pStats)
    15552 {
    15553  VMA_ASSERT(allocator && pStats);
    15554  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15555  allocator->CalculateStats(pStats);
    15556 }
    15557 
    15558 #if VMA_STATS_STRING_ENABLED
    15559 
    15560 void vmaBuildStatsString(
    15561  VmaAllocator allocator,
    15562  char** ppStatsString,
    15563  VkBool32 detailedMap)
    15564 {
    15565  VMA_ASSERT(allocator && ppStatsString);
    15566  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15567 
    15568  VmaStringBuilder sb(allocator);
    15569  {
    15570  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    15571  json.BeginObject();
    15572 
    15573  VmaStats stats;
    15574  allocator->CalculateStats(&stats);
    15575 
    15576  json.WriteString("Total");
    15577  VmaPrintStatInfo(json, stats.total);
    15578 
    15579  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    15580  {
    15581  json.BeginString("Heap ");
    15582  json.ContinueString(heapIndex);
    15583  json.EndString();
    15584  json.BeginObject();
    15585 
    15586  json.WriteString("Size");
    15587  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    15588 
    15589  json.WriteString("Flags");
    15590  json.BeginArray(true);
    15591  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    15592  {
    15593  json.WriteString("DEVICE_LOCAL");
    15594  }
    15595  json.EndArray();
    15596 
    15597  if(stats.memoryHeap[heapIndex].blockCount > 0)
    15598  {
    15599  json.WriteString("Stats");
    15600  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    15601  }
    15602 
    15603  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    15604  {
    15605  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    15606  {
    15607  json.BeginString("Type ");
    15608  json.ContinueString(typeIndex);
    15609  json.EndString();
    15610 
    15611  json.BeginObject();
    15612 
    15613  json.WriteString("Flags");
    15614  json.BeginArray(true);
    15615  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    15616  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    15617  {
    15618  json.WriteString("DEVICE_LOCAL");
    15619  }
    15620  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    15621  {
    15622  json.WriteString("HOST_VISIBLE");
    15623  }
    15624  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    15625  {
    15626  json.WriteString("HOST_COHERENT");
    15627  }
    15628  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    15629  {
    15630  json.WriteString("HOST_CACHED");
    15631  }
    15632  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    15633  {
    15634  json.WriteString("LAZILY_ALLOCATED");
    15635  }
    15636  json.EndArray();
    15637 
    15638  if(stats.memoryType[typeIndex].blockCount > 0)
    15639  {
    15640  json.WriteString("Stats");
    15641  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    15642  }
    15643 
    15644  json.EndObject();
    15645  }
    15646  }
    15647 
    15648  json.EndObject();
    15649  }
    15650  if(detailedMap == VK_TRUE)
    15651  {
    15652  allocator->PrintDetailedMap(json);
    15653  }
    15654 
    15655  json.EndObject();
    15656  }
    15657 
    15658  const size_t len = sb.GetLength();
    15659  char* const pChars = vma_new_array(allocator, char, len + 1);
    15660  if(len > 0)
    15661  {
    15662  memcpy(pChars, sb.GetData(), len);
    15663  }
    15664  pChars[len] = '\0';
    15665  *ppStatsString = pChars;
    15666 }
    15667 
    15668 void vmaFreeStatsString(
    15669  VmaAllocator allocator,
    15670  char* pStatsString)
    15671 {
    15672  if(pStatsString != VMA_NULL)
    15673  {
    15674  VMA_ASSERT(allocator);
    15675  size_t len = strlen(pStatsString);
    15676  vma_delete_array(allocator, pStatsString, len + 1);
    15677  }
    15678 }
    15679 
    15680 #endif // #if VMA_STATS_STRING_ENABLED
    15681 
    15682 /*
    15683 This function is not protected by any mutex because it just reads immutable data.
    15684 */
    15685 VkResult vmaFindMemoryTypeIndex(
    15686  VmaAllocator allocator,
    15687  uint32_t memoryTypeBits,
    15688  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    15689  uint32_t* pMemoryTypeIndex)
    15690 {
    15691  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    15692  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    15693  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    15694 
    15695  if(pAllocationCreateInfo->memoryTypeBits != 0)
    15696  {
    15697  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
    15698  }
    15699 
    15700  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    15701  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    15702 
    15703  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    15704  if(mapped)
    15705  {
    15706  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    15707  }
    15708 
    15709  // Convert usage to requiredFlags and preferredFlags.
    15710  switch(pAllocationCreateInfo->usage)
    15711  {
    15713  break;
    15715  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    15716  {
    15717  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    15718  }
    15719  break;
    15721  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    15722  break;
    15724  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    15725  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    15726  {
    15727  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    15728  }
    15729  break;
    15731  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    15732  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    15733  break;
    15734  default:
    15735  break;
    15736  }
    15737 
    15738  *pMemoryTypeIndex = UINT32_MAX;
    15739  uint32_t minCost = UINT32_MAX;
    15740  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    15741  memTypeIndex < allocator->GetMemoryTypeCount();
    15742  ++memTypeIndex, memTypeBit <<= 1)
    15743  {
    15744  // This memory type is acceptable according to memoryTypeBits bitmask.
    15745  if((memTypeBit & memoryTypeBits) != 0)
    15746  {
    15747  const VkMemoryPropertyFlags currFlags =
    15748  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    15749  // This memory type contains requiredFlags.
    15750  if((requiredFlags & ~currFlags) == 0)
    15751  {
    15752  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    15753  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
    15754  // Remember memory type with lowest cost.
    15755  if(currCost < minCost)
    15756  {
    15757  *pMemoryTypeIndex = memTypeIndex;
    15758  if(currCost == 0)
    15759  {
    15760  return VK_SUCCESS;
    15761  }
    15762  minCost = currCost;
    15763  }
    15764  }
    15765  }
    15766  }
    15767  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    15768 }
    15769 
    15771  VmaAllocator allocator,
    15772  const VkBufferCreateInfo* pBufferCreateInfo,
    15773  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    15774  uint32_t* pMemoryTypeIndex)
    15775 {
    15776  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    15777  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
    15778  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    15779  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    15780 
    15781  const VkDevice hDev = allocator->m_hDevice;
    15782  VkBuffer hBuffer = VK_NULL_HANDLE;
    15783  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
    15784  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
    15785  if(res == VK_SUCCESS)
    15786  {
    15787  VkMemoryRequirements memReq = {};
    15788  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
    15789  hDev, hBuffer, &memReq);
    15790 
    15791  res = vmaFindMemoryTypeIndex(
    15792  allocator,
    15793  memReq.memoryTypeBits,
    15794  pAllocationCreateInfo,
    15795  pMemoryTypeIndex);
    15796 
    15797  allocator->GetVulkanFunctions().vkDestroyBuffer(
    15798  hDev, hBuffer, allocator->GetAllocationCallbacks());
    15799  }
    15800  return res;
    15801 }
    15802 
    15804  VmaAllocator allocator,
    15805  const VkImageCreateInfo* pImageCreateInfo,
    15806  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    15807  uint32_t* pMemoryTypeIndex)
    15808 {
    15809  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    15810  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
    15811  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    15812  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    15813 
    15814  const VkDevice hDev = allocator->m_hDevice;
    15815  VkImage hImage = VK_NULL_HANDLE;
    15816  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
    15817  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
    15818  if(res == VK_SUCCESS)
    15819  {
    15820  VkMemoryRequirements memReq = {};
    15821  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
    15822  hDev, hImage, &memReq);
    15823 
    15824  res = vmaFindMemoryTypeIndex(
    15825  allocator,
    15826  memReq.memoryTypeBits,
    15827  pAllocationCreateInfo,
    15828  pMemoryTypeIndex);
    15829 
    15830  allocator->GetVulkanFunctions().vkDestroyImage(
    15831  hDev, hImage, allocator->GetAllocationCallbacks());
    15832  }
    15833  return res;
    15834 }
    15835 
    15836 VkResult vmaCreatePool(
    15837  VmaAllocator allocator,
    15838  const VmaPoolCreateInfo* pCreateInfo,
    15839  VmaPool* pPool)
    15840 {
    15841  VMA_ASSERT(allocator && pCreateInfo && pPool);
    15842 
    15843  VMA_DEBUG_LOG("vmaCreatePool");
    15844 
    15845  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15846 
    15847  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
    15848 
    15849 #if VMA_RECORDING_ENABLED
    15850  if(allocator->GetRecorder() != VMA_NULL)
    15851  {
    15852  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
    15853  }
    15854 #endif
    15855 
    15856  return res;
    15857 }
    15858 
    15859 void vmaDestroyPool(
    15860  VmaAllocator allocator,
    15861  VmaPool pool)
    15862 {
    15863  VMA_ASSERT(allocator);
    15864 
    15865  if(pool == VK_NULL_HANDLE)
    15866  {
    15867  return;
    15868  }
    15869 
    15870  VMA_DEBUG_LOG("vmaDestroyPool");
    15871 
    15872  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15873 
    15874 #if VMA_RECORDING_ENABLED
    15875  if(allocator->GetRecorder() != VMA_NULL)
    15876  {
    15877  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
    15878  }
    15879 #endif
    15880 
    15881  allocator->DestroyPool(pool);
    15882 }
    15883 
    15884 void vmaGetPoolStats(
    15885  VmaAllocator allocator,
    15886  VmaPool pool,
    15887  VmaPoolStats* pPoolStats)
    15888 {
    15889  VMA_ASSERT(allocator && pool && pPoolStats);
    15890 
    15891  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15892 
    15893  allocator->GetPoolStats(pool, pPoolStats);
    15894 }
    15895 
    15897  VmaAllocator allocator,
    15898  VmaPool pool,
    15899  size_t* pLostAllocationCount)
    15900 {
    15901  VMA_ASSERT(allocator && pool);
    15902 
    15903  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15904 
    15905 #if VMA_RECORDING_ENABLED
    15906  if(allocator->GetRecorder() != VMA_NULL)
    15907  {
    15908  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
    15909  }
    15910 #endif
    15911 
    15912  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    15913 }
    15914 
    15915 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    15916 {
    15917  VMA_ASSERT(allocator && pool);
    15918 
    15919  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15920 
    15921  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
    15922 
    15923  return allocator->CheckPoolCorruption(pool);
    15924 }
    15925 
    15926 VkResult vmaAllocateMemory(
    15927  VmaAllocator allocator,
    15928  const VkMemoryRequirements* pVkMemoryRequirements,
    15929  const VmaAllocationCreateInfo* pCreateInfo,
    15930  VmaAllocation* pAllocation,
    15931  VmaAllocationInfo* pAllocationInfo)
    15932 {
    15933  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    15934 
    15935  VMA_DEBUG_LOG("vmaAllocateMemory");
    15936 
    15937  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15938 
    15939  VkResult result = allocator->AllocateMemory(
    15940  *pVkMemoryRequirements,
    15941  false, // requiresDedicatedAllocation
    15942  false, // prefersDedicatedAllocation
    15943  VK_NULL_HANDLE, // dedicatedBuffer
    15944  VK_NULL_HANDLE, // dedicatedImage
    15945  *pCreateInfo,
    15946  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    15947  1, // allocationCount
    15948  pAllocation);
    15949 
    15950 #if VMA_RECORDING_ENABLED
    15951  if(allocator->GetRecorder() != VMA_NULL)
    15952  {
    15953  allocator->GetRecorder()->RecordAllocateMemory(
    15954  allocator->GetCurrentFrameIndex(),
    15955  *pVkMemoryRequirements,
    15956  *pCreateInfo,
    15957  *pAllocation);
    15958  }
    15959 #endif
    15960 
    15961  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    15962  {
    15963  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    15964  }
    15965 
    15966  return result;
    15967 }
    15968 
    15969 VkResult vmaAllocateMemoryPages(
    15970  VmaAllocator allocator,
    15971  const VkMemoryRequirements* pVkMemoryRequirements,
    15972  const VmaAllocationCreateInfo* pCreateInfo,
    15973  size_t allocationCount,
    15974  VmaAllocation* pAllocations,
    15975  VmaAllocationInfo* pAllocationInfo)
    15976 {
    15977  if(allocationCount == 0)
    15978  {
    15979  return VK_SUCCESS;
    15980  }
    15981 
    15982  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
    15983 
    15984  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
    15985 
    15986  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15987 
    15988  VkResult result = allocator->AllocateMemory(
    15989  *pVkMemoryRequirements,
    15990  false, // requiresDedicatedAllocation
    15991  false, // prefersDedicatedAllocation
    15992  VK_NULL_HANDLE, // dedicatedBuffer
    15993  VK_NULL_HANDLE, // dedicatedImage
    15994  *pCreateInfo,
    15995  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    15996  allocationCount,
    15997  pAllocations);
    15998 
    15999 #if VMA_RECORDING_ENABLED
    16000  if(allocator->GetRecorder() != VMA_NULL)
    16001  {
    16002  allocator->GetRecorder()->RecordAllocateMemoryPages(
    16003  allocator->GetCurrentFrameIndex(),
    16004  *pVkMemoryRequirements,
    16005  *pCreateInfo,
    16006  (uint64_t)allocationCount,
    16007  pAllocations);
    16008  }
    16009 #endif
    16010 
    16011  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    16012  {
    16013  for(size_t i = 0; i < allocationCount; ++i)
    16014  {
    16015  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
    16016  }
    16017  }
    16018 
    16019  return result;
    16020 }
    16021 
    16023  VmaAllocator allocator,
    16024  VkBuffer buffer,
    16025  const VmaAllocationCreateInfo* pCreateInfo,
    16026  VmaAllocation* pAllocation,
    16027  VmaAllocationInfo* pAllocationInfo)
    16028 {
    16029  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    16030 
    16031  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    16032 
    16033  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16034 
    16035  VkMemoryRequirements vkMemReq = {};
    16036  bool requiresDedicatedAllocation = false;
    16037  bool prefersDedicatedAllocation = false;
    16038  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    16039  requiresDedicatedAllocation,
    16040  prefersDedicatedAllocation);
    16041 
    16042  VkResult result = allocator->AllocateMemory(
    16043  vkMemReq,
    16044  requiresDedicatedAllocation,
    16045  prefersDedicatedAllocation,
    16046  buffer, // dedicatedBuffer
    16047  VK_NULL_HANDLE, // dedicatedImage
    16048  *pCreateInfo,
    16049  VMA_SUBALLOCATION_TYPE_BUFFER,
    16050  1, // allocationCount
    16051  pAllocation);
    16052 
    16053 #if VMA_RECORDING_ENABLED
    16054  if(allocator->GetRecorder() != VMA_NULL)
    16055  {
    16056  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
    16057  allocator->GetCurrentFrameIndex(),
    16058  vkMemReq,
    16059  requiresDedicatedAllocation,
    16060  prefersDedicatedAllocation,
    16061  *pCreateInfo,
    16062  *pAllocation);
    16063  }
    16064 #endif
    16065 
    16066  if(pAllocationInfo && result == VK_SUCCESS)
    16067  {
    16068  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16069  }
    16070 
    16071  return result;
    16072 }
    16073 
    16074 VkResult vmaAllocateMemoryForImage(
    16075  VmaAllocator allocator,
    16076  VkImage image,
    16077  const VmaAllocationCreateInfo* pCreateInfo,
    16078  VmaAllocation* pAllocation,
    16079  VmaAllocationInfo* pAllocationInfo)
    16080 {
    16081  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    16082 
    16083  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    16084 
    16085  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16086 
    16087  VkMemoryRequirements vkMemReq = {};
    16088  bool requiresDedicatedAllocation = false;
    16089  bool prefersDedicatedAllocation = false;
    16090  allocator->GetImageMemoryRequirements(image, vkMemReq,
    16091  requiresDedicatedAllocation, prefersDedicatedAllocation);
    16092 
    16093  VkResult result = allocator->AllocateMemory(
    16094  vkMemReq,
    16095  requiresDedicatedAllocation,
    16096  prefersDedicatedAllocation,
    16097  VK_NULL_HANDLE, // dedicatedBuffer
    16098  image, // dedicatedImage
    16099  *pCreateInfo,
    16100  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    16101  1, // allocationCount
    16102  pAllocation);
    16103 
    16104 #if VMA_RECORDING_ENABLED
    16105  if(allocator->GetRecorder() != VMA_NULL)
    16106  {
    16107  allocator->GetRecorder()->RecordAllocateMemoryForImage(
    16108  allocator->GetCurrentFrameIndex(),
    16109  vkMemReq,
    16110  requiresDedicatedAllocation,
    16111  prefersDedicatedAllocation,
    16112  *pCreateInfo,
    16113  *pAllocation);
    16114  }
    16115 #endif
    16116 
    16117  if(pAllocationInfo && result == VK_SUCCESS)
    16118  {
    16119  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16120  }
    16121 
    16122  return result;
    16123 }
    16124 
    16125 void vmaFreeMemory(
    16126  VmaAllocator allocator,
    16127  VmaAllocation allocation)
    16128 {
    16129  VMA_ASSERT(allocator);
    16130 
    16131  if(allocation == VK_NULL_HANDLE)
    16132  {
    16133  return;
    16134  }
    16135 
    16136  VMA_DEBUG_LOG("vmaFreeMemory");
    16137 
    16138  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16139 
    16140 #if VMA_RECORDING_ENABLED
    16141  if(allocator->GetRecorder() != VMA_NULL)
    16142  {
    16143  allocator->GetRecorder()->RecordFreeMemory(
    16144  allocator->GetCurrentFrameIndex(),
    16145  allocation);
    16146  }
    16147 #endif
    16148 
    16149  allocator->FreeMemory(
    16150  1, // allocationCount
    16151  &allocation);
    16152 }
    16153 
    16154 void vmaFreeMemoryPages(
    16155  VmaAllocator allocator,
    16156  size_t allocationCount,
    16157  VmaAllocation* pAllocations)
    16158 {
    16159  if(allocationCount == 0)
    16160  {
    16161  return;
    16162  }
    16163 
    16164  VMA_ASSERT(allocator);
    16165 
    16166  VMA_DEBUG_LOG("vmaFreeMemoryPages");
    16167 
    16168  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16169 
    16170 #if VMA_RECORDING_ENABLED
    16171  if(allocator->GetRecorder() != VMA_NULL)
    16172  {
    16173  allocator->GetRecorder()->RecordFreeMemoryPages(
    16174  allocator->GetCurrentFrameIndex(),
    16175  (uint64_t)allocationCount,
    16176  pAllocations);
    16177  }
    16178 #endif
    16179 
    16180  allocator->FreeMemory(allocationCount, pAllocations);
    16181 }
    16182 
    16183 VkResult vmaResizeAllocation(
    16184  VmaAllocator allocator,
    16185  VmaAllocation allocation,
    16186  VkDeviceSize newSize)
    16187 {
    16188  VMA_ASSERT(allocator && allocation);
    16189 
    16190  VMA_DEBUG_LOG("vmaResizeAllocation");
    16191 
    16192  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16193 
    16194 #if VMA_RECORDING_ENABLED
    16195  if(allocator->GetRecorder() != VMA_NULL)
    16196  {
    16197  allocator->GetRecorder()->RecordResizeAllocation(
    16198  allocator->GetCurrentFrameIndex(),
    16199  allocation,
    16200  newSize);
    16201  }
    16202 #endif
    16203 
    16204  return allocator->ResizeAllocation(allocation, newSize);
    16205 }
    16206 
    16208  VmaAllocator allocator,
    16209  VmaAllocation allocation,
    16210  VmaAllocationInfo* pAllocationInfo)
    16211 {
    16212  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    16213 
    16214  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16215 
    16216 #if VMA_RECORDING_ENABLED
    16217  if(allocator->GetRecorder() != VMA_NULL)
    16218  {
    16219  allocator->GetRecorder()->RecordGetAllocationInfo(
    16220  allocator->GetCurrentFrameIndex(),
    16221  allocation);
    16222  }
    16223 #endif
    16224 
    16225  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    16226 }
    16227 
    16228 VkBool32 vmaTouchAllocation(
    16229  VmaAllocator allocator,
    16230  VmaAllocation allocation)
    16231 {
    16232  VMA_ASSERT(allocator && allocation);
    16233 
    16234  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16235 
    16236 #if VMA_RECORDING_ENABLED
    16237  if(allocator->GetRecorder() != VMA_NULL)
    16238  {
    16239  allocator->GetRecorder()->RecordTouchAllocation(
    16240  allocator->GetCurrentFrameIndex(),
    16241  allocation);
    16242  }
    16243 #endif
    16244 
    16245  return allocator->TouchAllocation(allocation);
    16246 }
    16247 
    16249  VmaAllocator allocator,
    16250  VmaAllocation allocation,
    16251  void* pUserData)
    16252 {
    16253  VMA_ASSERT(allocator && allocation);
    16254 
    16255  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16256 
    16257  allocation->SetUserData(allocator, pUserData);
    16258 
    16259 #if VMA_RECORDING_ENABLED
    16260  if(allocator->GetRecorder() != VMA_NULL)
    16261  {
    16262  allocator->GetRecorder()->RecordSetAllocationUserData(
    16263  allocator->GetCurrentFrameIndex(),
    16264  allocation,
    16265  pUserData);
    16266  }
    16267 #endif
    16268 }
    16269 
    16271  VmaAllocator allocator,
    16272  VmaAllocation* pAllocation)
    16273 {
    16274  VMA_ASSERT(allocator && pAllocation);
    16275 
    16276  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    16277 
    16278  allocator->CreateLostAllocation(pAllocation);
    16279 
    16280 #if VMA_RECORDING_ENABLED
    16281  if(allocator->GetRecorder() != VMA_NULL)
    16282  {
    16283  allocator->GetRecorder()->RecordCreateLostAllocation(
    16284  allocator->GetCurrentFrameIndex(),
    16285  *pAllocation);
    16286  }
    16287 #endif
    16288 }
    16289 
    16290 VkResult vmaMapMemory(
    16291  VmaAllocator allocator,
    16292  VmaAllocation allocation,
    16293  void** ppData)
    16294 {
    16295  VMA_ASSERT(allocator && allocation && ppData);
    16296 
    16297  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16298 
    16299  VkResult res = allocator->Map(allocation, ppData);
    16300 
    16301 #if VMA_RECORDING_ENABLED
    16302  if(allocator->GetRecorder() != VMA_NULL)
    16303  {
    16304  allocator->GetRecorder()->RecordMapMemory(
    16305  allocator->GetCurrentFrameIndex(),
    16306  allocation);
    16307  }
    16308 #endif
    16309 
    16310  return res;
    16311 }
    16312 
    16313 void vmaUnmapMemory(
    16314  VmaAllocator allocator,
    16315  VmaAllocation allocation)
    16316 {
    16317  VMA_ASSERT(allocator && allocation);
    16318 
    16319  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16320 
    16321 #if VMA_RECORDING_ENABLED
    16322  if(allocator->GetRecorder() != VMA_NULL)
    16323  {
    16324  allocator->GetRecorder()->RecordUnmapMemory(
    16325  allocator->GetCurrentFrameIndex(),
    16326  allocation);
    16327  }
    16328 #endif
    16329 
    16330  allocator->Unmap(allocation);
    16331 }
    16332 
    16333 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    16334 {
    16335  VMA_ASSERT(allocator && allocation);
    16336 
    16337  VMA_DEBUG_LOG("vmaFlushAllocation");
    16338 
    16339  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16340 
    16341  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
    16342 
    16343 #if VMA_RECORDING_ENABLED
    16344  if(allocator->GetRecorder() != VMA_NULL)
    16345  {
    16346  allocator->GetRecorder()->RecordFlushAllocation(
    16347  allocator->GetCurrentFrameIndex(),
    16348  allocation, offset, size);
    16349  }
    16350 #endif
    16351 }
    16352 
    16353 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    16354 {
    16355  VMA_ASSERT(allocator && allocation);
    16356 
    16357  VMA_DEBUG_LOG("vmaInvalidateAllocation");
    16358 
    16359  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16360 
    16361  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
    16362 
    16363 #if VMA_RECORDING_ENABLED
    16364  if(allocator->GetRecorder() != VMA_NULL)
    16365  {
    16366  allocator->GetRecorder()->RecordInvalidateAllocation(
    16367  allocator->GetCurrentFrameIndex(),
    16368  allocation, offset, size);
    16369  }
    16370 #endif
    16371 }
    16372 
    16373 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    16374 {
    16375  VMA_ASSERT(allocator);
    16376 
    16377  VMA_DEBUG_LOG("vmaCheckCorruption");
    16378 
    16379  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16380 
    16381  return allocator->CheckCorruption(memoryTypeBits);
    16382 }
    16383 
    16384 VkResult vmaDefragment(
    16385  VmaAllocator allocator,
    16386  VmaAllocation* pAllocations,
    16387  size_t allocationCount,
    16388  VkBool32* pAllocationsChanged,
    16389  const VmaDefragmentationInfo *pDefragmentationInfo,
    16390  VmaDefragmentationStats* pDefragmentationStats)
    16391 {
    16392  // Deprecated interface, reimplemented using new one.
    16393 
    16394  VmaDefragmentationInfo2 info2 = {};
    16395  info2.allocationCount = (uint32_t)allocationCount;
    16396  info2.pAllocations = pAllocations;
    16397  info2.pAllocationsChanged = pAllocationsChanged;
    16398  if(pDefragmentationInfo != VMA_NULL)
    16399  {
    16400  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    16401  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
    16402  }
    16403  else
    16404  {
    16405  info2.maxCpuAllocationsToMove = UINT32_MAX;
    16406  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
    16407  }
    16408  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
    16409 
    16411  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
    16412  if(res == VK_NOT_READY)
    16413  {
    16414  res = vmaDefragmentationEnd( allocator, ctx);
    16415  }
    16416  return res;
    16417 }
    16418 
    16419 VkResult vmaDefragmentationBegin(
    16420  VmaAllocator allocator,
    16421  const VmaDefragmentationInfo2* pInfo,
    16422  VmaDefragmentationStats* pStats,
    16423  VmaDefragmentationContext *pContext)
    16424 {
    16425  VMA_ASSERT(allocator && pInfo && pContext);
    16426 
    16427  // Degenerate case: Nothing to defragment.
    16428  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
    16429  {
    16430  return VK_SUCCESS;
    16431  }
    16432 
    16433  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
    16434  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
    16435  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
    16436  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
    16437 
    16438  VMA_DEBUG_LOG("vmaDefragmentationBegin");
    16439 
    16440  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16441 
    16442  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
    16443 
    16444 #if VMA_RECORDING_ENABLED
    16445  if(allocator->GetRecorder() != VMA_NULL)
    16446  {
    16447  allocator->GetRecorder()->RecordDefragmentationBegin(
    16448  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
    16449  }
    16450 #endif
    16451 
    16452  return res;
    16453 }
    16454 
    16455 VkResult vmaDefragmentationEnd(
    16456  VmaAllocator allocator,
    16457  VmaDefragmentationContext context)
    16458 {
    16459  VMA_ASSERT(allocator);
    16460 
    16461  VMA_DEBUG_LOG("vmaDefragmentationEnd");
    16462 
    16463  if(context != VK_NULL_HANDLE)
    16464  {
    16465  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16466 
    16467 #if VMA_RECORDING_ENABLED
    16468  if(allocator->GetRecorder() != VMA_NULL)
    16469  {
    16470  allocator->GetRecorder()->RecordDefragmentationEnd(
    16471  allocator->GetCurrentFrameIndex(), context);
    16472  }
    16473 #endif
    16474 
    16475  return allocator->DefragmentationEnd(context);
    16476  }
    16477  else
    16478  {
    16479  return VK_SUCCESS;
    16480  }
    16481 }
    16482 
    16483 VkResult vmaBindBufferMemory(
    16484  VmaAllocator allocator,
    16485  VmaAllocation allocation,
    16486  VkBuffer buffer)
    16487 {
    16488  VMA_ASSERT(allocator && allocation && buffer);
    16489 
    16490  VMA_DEBUG_LOG("vmaBindBufferMemory");
    16491 
    16492  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16493 
    16494  return allocator->BindBufferMemory(allocation, buffer);
    16495 }
    16496 
    16497 VkResult vmaBindImageMemory(
    16498  VmaAllocator allocator,
    16499  VmaAllocation allocation,
    16500  VkImage image)
    16501 {
    16502  VMA_ASSERT(allocator && allocation && image);
    16503 
    16504  VMA_DEBUG_LOG("vmaBindImageMemory");
    16505 
    16506  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16507 
    16508  return allocator->BindImageMemory(allocation, image);
    16509 }
    16510 
    16511 VkResult vmaCreateBuffer(
    16512  VmaAllocator allocator,
    16513  const VkBufferCreateInfo* pBufferCreateInfo,
    16514  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    16515  VkBuffer* pBuffer,
    16516  VmaAllocation* pAllocation,
    16517  VmaAllocationInfo* pAllocationInfo)
    16518 {
    16519  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    16520 
    16521  if(pBufferCreateInfo->size == 0)
    16522  {
    16523  return VK_ERROR_VALIDATION_FAILED_EXT;
    16524  }
    16525 
    16526  VMA_DEBUG_LOG("vmaCreateBuffer");
    16527 
    16528  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16529 
    16530  *pBuffer = VK_NULL_HANDLE;
    16531  *pAllocation = VK_NULL_HANDLE;
    16532 
    16533  // 1. Create VkBuffer.
    16534  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    16535  allocator->m_hDevice,
    16536  pBufferCreateInfo,
    16537  allocator->GetAllocationCallbacks(),
    16538  pBuffer);
    16539  if(res >= 0)
    16540  {
    16541  // 2. vkGetBufferMemoryRequirements.
    16542  VkMemoryRequirements vkMemReq = {};
    16543  bool requiresDedicatedAllocation = false;
    16544  bool prefersDedicatedAllocation = false;
    16545  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    16546  requiresDedicatedAllocation, prefersDedicatedAllocation);
    16547 
    16548  // Make sure alignment requirements for specific buffer usages reported
    16549  // in Physical Device Properties are included in alignment reported by memory requirements.
    16550  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
    16551  {
    16552  VMA_ASSERT(vkMemReq.alignment %
    16553  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
    16554  }
    16555  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
    16556  {
    16557  VMA_ASSERT(vkMemReq.alignment %
    16558  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
    16559  }
    16560  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
    16561  {
    16562  VMA_ASSERT(vkMemReq.alignment %
    16563  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
    16564  }
    16565 
    16566  // 3. Allocate memory using allocator.
    16567  res = allocator->AllocateMemory(
    16568  vkMemReq,
    16569  requiresDedicatedAllocation,
    16570  prefersDedicatedAllocation,
    16571  *pBuffer, // dedicatedBuffer
    16572  VK_NULL_HANDLE, // dedicatedImage
    16573  *pAllocationCreateInfo,
    16574  VMA_SUBALLOCATION_TYPE_BUFFER,
    16575  1, // allocationCount
    16576  pAllocation);
    16577 
    16578 #if VMA_RECORDING_ENABLED
    16579  if(allocator->GetRecorder() != VMA_NULL)
    16580  {
    16581  allocator->GetRecorder()->RecordCreateBuffer(
    16582  allocator->GetCurrentFrameIndex(),
    16583  *pBufferCreateInfo,
    16584  *pAllocationCreateInfo,
    16585  *pAllocation);
    16586  }
    16587 #endif
    16588 
    16589  if(res >= 0)
    16590  {
    16591  // 3. Bind buffer with memory.
    16592  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
    16593  if(res >= 0)
    16594  {
    16595  // All steps succeeded.
    16596  #if VMA_STATS_STRING_ENABLED
    16597  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
    16598  #endif
    16599  if(pAllocationInfo != VMA_NULL)
    16600  {
    16601  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16602  }
    16603 
    16604  return VK_SUCCESS;
    16605  }
    16606  allocator->FreeMemory(
    16607  1, // allocationCount
    16608  pAllocation);
    16609  *pAllocation = VK_NULL_HANDLE;
    16610  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    16611  *pBuffer = VK_NULL_HANDLE;
    16612  return res;
    16613  }
    16614  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    16615  *pBuffer = VK_NULL_HANDLE;
    16616  return res;
    16617  }
    16618  return res;
    16619 }
    16620 
    16621 void vmaDestroyBuffer(
    16622  VmaAllocator allocator,
    16623  VkBuffer buffer,
    16624  VmaAllocation allocation)
    16625 {
    16626  VMA_ASSERT(allocator);
    16627 
    16628  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    16629  {
    16630  return;
    16631  }
    16632 
    16633  VMA_DEBUG_LOG("vmaDestroyBuffer");
    16634 
    16635  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16636 
    16637 #if VMA_RECORDING_ENABLED
    16638  if(allocator->GetRecorder() != VMA_NULL)
    16639  {
    16640  allocator->GetRecorder()->RecordDestroyBuffer(
    16641  allocator->GetCurrentFrameIndex(),
    16642  allocation);
    16643  }
    16644 #endif
    16645 
    16646  if(buffer != VK_NULL_HANDLE)
    16647  {
    16648  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    16649  }
    16650 
    16651  if(allocation != VK_NULL_HANDLE)
    16652  {
    16653  allocator->FreeMemory(
    16654  1, // allocationCount
    16655  &allocation);
    16656  }
    16657 }
    16658 
    16659 VkResult vmaCreateImage(
    16660  VmaAllocator allocator,
    16661  const VkImageCreateInfo* pImageCreateInfo,
    16662  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    16663  VkImage* pImage,
    16664  VmaAllocation* pAllocation,
    16665  VmaAllocationInfo* pAllocationInfo)
    16666 {
    16667  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    16668 
    16669  if(pImageCreateInfo->extent.width == 0 ||
    16670  pImageCreateInfo->extent.height == 0 ||
    16671  pImageCreateInfo->extent.depth == 0 ||
    16672  pImageCreateInfo->mipLevels == 0 ||
    16673  pImageCreateInfo->arrayLayers == 0)
    16674  {
    16675  return VK_ERROR_VALIDATION_FAILED_EXT;
    16676  }
    16677 
    16678  VMA_DEBUG_LOG("vmaCreateImage");
    16679 
    16680  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16681 
    16682  *pImage = VK_NULL_HANDLE;
    16683  *pAllocation = VK_NULL_HANDLE;
    16684 
    16685  // 1. Create VkImage.
    16686  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    16687  allocator->m_hDevice,
    16688  pImageCreateInfo,
    16689  allocator->GetAllocationCallbacks(),
    16690  pImage);
    16691  if(res >= 0)
    16692  {
    16693  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    16694  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    16695  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    16696 
    16697  // 2. Allocate memory using allocator.
    16698  VkMemoryRequirements vkMemReq = {};
    16699  bool requiresDedicatedAllocation = false;
    16700  bool prefersDedicatedAllocation = false;
    16701  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
    16702  requiresDedicatedAllocation, prefersDedicatedAllocation);
    16703 
    16704  res = allocator->AllocateMemory(
    16705  vkMemReq,
    16706  requiresDedicatedAllocation,
    16707  prefersDedicatedAllocation,
    16708  VK_NULL_HANDLE, // dedicatedBuffer
    16709  *pImage, // dedicatedImage
    16710  *pAllocationCreateInfo,
    16711  suballocType,
    16712  1, // allocationCount
    16713  pAllocation);
    16714 
    16715 #if VMA_RECORDING_ENABLED
    16716  if(allocator->GetRecorder() != VMA_NULL)
    16717  {
    16718  allocator->GetRecorder()->RecordCreateImage(
    16719  allocator->GetCurrentFrameIndex(),
    16720  *pImageCreateInfo,
    16721  *pAllocationCreateInfo,
    16722  *pAllocation);
    16723  }
    16724 #endif
    16725 
    16726  if(res >= 0)
    16727  {
    16728  // 3. Bind image with memory.
    16729  res = allocator->BindImageMemory(*pAllocation, *pImage);
    16730  if(res >= 0)
    16731  {
    16732  // All steps succeeded.
    16733  #if VMA_STATS_STRING_ENABLED
    16734  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
    16735  #endif
    16736  if(pAllocationInfo != VMA_NULL)
    16737  {
    16738  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16739  }
    16740 
    16741  return VK_SUCCESS;
    16742  }
    16743  allocator->FreeMemory(
    16744  1, // allocationCount
    16745  pAllocation);
    16746  *pAllocation = VK_NULL_HANDLE;
    16747  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    16748  *pImage = VK_NULL_HANDLE;
    16749  return res;
    16750  }
    16751  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    16752  *pImage = VK_NULL_HANDLE;
    16753  return res;
    16754  }
    16755  return res;
    16756 }
    16757 
    16758 void vmaDestroyImage(
    16759  VmaAllocator allocator,
    16760  VkImage image,
    16761  VmaAllocation allocation)
    16762 {
    16763  VMA_ASSERT(allocator);
    16764 
    16765  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    16766  {
    16767  return;
    16768  }
    16769 
    16770  VMA_DEBUG_LOG("vmaDestroyImage");
    16771 
    16772  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16773 
    16774 #if VMA_RECORDING_ENABLED
    16775  if(allocator->GetRecorder() != VMA_NULL)
    16776  {
    16777  allocator->GetRecorder()->RecordDestroyImage(
    16778  allocator->GetCurrentFrameIndex(),
    16779  allocation);
    16780  }
    16781 #endif
    16782 
    16783  if(image != VK_NULL_HANDLE)
    16784  {
    16785  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    16786  }
    16787  if(allocation != VK_NULL_HANDLE)
    16788  {
    16789  allocator->FreeMemory(
    16790  1, // allocationCount
    16791  &allocation);
    16792  }
    16793 }
    16794 
    16795 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:1753
    -
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:2051
    +Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    1644 /*
    1645 Define this macro to 0/1 to disable/enable support for recording functionality,
    1646 available through VmaAllocatorCreateInfo::pRecordSettings.
    1647 */
    1648 #ifndef VMA_RECORDING_ENABLED
    1649  #ifdef _WIN32
    1650  #define VMA_RECORDING_ENABLED 1
    1651  #else
    1652  #define VMA_RECORDING_ENABLED 0
    1653  #endif
    1654 #endif
    1655 
    1656 #ifndef NOMINMAX
    1657  #define NOMINMAX // For windows.h
    1658 #endif
    1659 
    1660 #ifndef VULKAN_H_
    1661  #include <vulkan/vulkan.h>
    1662 #endif
    1663 
    1664 #if VMA_RECORDING_ENABLED
    1665  #include <windows.h>
    1666 #endif
    1667 
    1668 #if !defined(VMA_DEDICATED_ALLOCATION)
    1669  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
    1670  #define VMA_DEDICATED_ALLOCATION 1
    1671  #else
    1672  #define VMA_DEDICATED_ALLOCATION 0
    1673  #endif
    1674 #endif
    1675 
    1685 VK_DEFINE_HANDLE(VmaAllocator)
    1686 
    1687 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    1689  VmaAllocator allocator,
    1690  uint32_t memoryType,
    1691  VkDeviceMemory memory,
    1692  VkDeviceSize size);
    1694 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    1695  VmaAllocator allocator,
    1696  uint32_t memoryType,
    1697  VkDeviceMemory memory,
    1698  VkDeviceSize size);
    1699 
    1713 
    1743 
    1746 typedef VkFlags VmaAllocatorCreateFlags;
    1747 
    1752 typedef struct VmaVulkanFunctions {
    1753  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    1754  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    1755  PFN_vkAllocateMemory vkAllocateMemory;
    1756  PFN_vkFreeMemory vkFreeMemory;
    1757  PFN_vkMapMemory vkMapMemory;
    1758  PFN_vkUnmapMemory vkUnmapMemory;
    1759  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
    1760  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
    1761  PFN_vkBindBufferMemory vkBindBufferMemory;
    1762  PFN_vkBindImageMemory vkBindImageMemory;
    1763  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    1764  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    1765  PFN_vkCreateBuffer vkCreateBuffer;
    1766  PFN_vkDestroyBuffer vkDestroyBuffer;
    1767  PFN_vkCreateImage vkCreateImage;
    1768  PFN_vkDestroyImage vkDestroyImage;
    1769  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
    1770 #if VMA_DEDICATED_ALLOCATION
    1771  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    1772  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    1773 #endif
    1775 
    1777 typedef enum VmaRecordFlagBits {
    1784 
    1787 typedef VkFlags VmaRecordFlags;
    1788 
    1790 typedef struct VmaRecordSettings
    1791 {
    1801  const char* pFilePath;
    1803 
    1806 {
    1810 
    1811  VkPhysicalDevice physicalDevice;
    1813 
    1814  VkDevice device;
    1816 
    1819 
    1820  const VkAllocationCallbacks* pAllocationCallbacks;
    1822 
    1862  const VkDeviceSize* pHeapSizeLimit;
    1883 
    1885 VkResult vmaCreateAllocator(
    1886  const VmaAllocatorCreateInfo* pCreateInfo,
    1887  VmaAllocator* pAllocator);
    1888 
    1890 void vmaDestroyAllocator(
    1891  VmaAllocator allocator);
    1892 
    1898  VmaAllocator allocator,
    1899  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    1900 
    1906  VmaAllocator allocator,
    1907  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    1908 
    1916  VmaAllocator allocator,
    1917  uint32_t memoryTypeIndex,
    1918  VkMemoryPropertyFlags* pFlags);
    1919 
    1929  VmaAllocator allocator,
    1930  uint32_t frameIndex);
    1931 
    1934 typedef struct VmaStatInfo
    1935 {
    1937  uint32_t blockCount;
    1943  VkDeviceSize usedBytes;
    1945  VkDeviceSize unusedBytes;
    1948 } VmaStatInfo;
    1949 
    1951 typedef struct VmaStats
    1952 {
    1953  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    1954  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    1956 } VmaStats;
    1957 
    1959 void vmaCalculateStats(
    1960  VmaAllocator allocator,
    1961  VmaStats* pStats);
    1962 
    1963 #ifndef VMA_STATS_STRING_ENABLED
    1964 #define VMA_STATS_STRING_ENABLED 1
    1965 #endif
    1966 
    1967 #if VMA_STATS_STRING_ENABLED
    1968 
    1970 
    1972 void vmaBuildStatsString(
    1973  VmaAllocator allocator,
    1974  char** ppStatsString,
    1975  VkBool32 detailedMap);
    1976 
    1977 void vmaFreeStatsString(
    1978  VmaAllocator allocator,
    1979  char* pStatsString);
    1980 
    1981 #endif // #if VMA_STATS_STRING_ENABLED
    1982 
    1991 VK_DEFINE_HANDLE(VmaPool)
    1992 
    1993 typedef enum VmaMemoryUsage
    1994 {
    2043 } VmaMemoryUsage;
    2044 
    2054 
    2115 
    2131 
    2141 
    2148 
    2152 
    2154 {
    2167  VkMemoryPropertyFlags requiredFlags;
    2172  VkMemoryPropertyFlags preferredFlags;
    2180  uint32_t memoryTypeBits;
    2193  void* pUserData;
    2195 
    2212 VkResult vmaFindMemoryTypeIndex(
    2213  VmaAllocator allocator,
    2214  uint32_t memoryTypeBits,
    2215  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2216  uint32_t* pMemoryTypeIndex);
    2217 
    2231  VmaAllocator allocator,
    2232  const VkBufferCreateInfo* pBufferCreateInfo,
    2233  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2234  uint32_t* pMemoryTypeIndex);
    2235 
    2249  VmaAllocator allocator,
    2250  const VkImageCreateInfo* pImageCreateInfo,
    2251  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2252  uint32_t* pMemoryTypeIndex);
    2253 
    2274 
    2291 
    2302 
    2308 
    2311 typedef VkFlags VmaPoolCreateFlags;
    2312 
    2315 typedef struct VmaPoolCreateInfo {
    2330  VkDeviceSize blockSize;
    2359 
    2362 typedef struct VmaPoolStats {
    2365  VkDeviceSize size;
    2368  VkDeviceSize unusedSize;
    2381  VkDeviceSize unusedRangeSizeMax;
    2384  size_t blockCount;
    2385 } VmaPoolStats;
    2386 
    2393 VkResult vmaCreatePool(
    2394  VmaAllocator allocator,
    2395  const VmaPoolCreateInfo* pCreateInfo,
    2396  VmaPool* pPool);
    2397 
    2400 void vmaDestroyPool(
    2401  VmaAllocator allocator,
    2402  VmaPool pool);
    2403 
    2410 void vmaGetPoolStats(
    2411  VmaAllocator allocator,
    2412  VmaPool pool,
    2413  VmaPoolStats* pPoolStats);
    2414 
    2422  VmaAllocator allocator,
    2423  VmaPool pool,
    2424  size_t* pLostAllocationCount);
    2425 
    2440 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
    2441 
    2466 VK_DEFINE_HANDLE(VmaAllocation)
    2467 
    2468 
    2470 typedef struct VmaAllocationInfo {
    2475  uint32_t memoryType;
    2484  VkDeviceMemory deviceMemory;
    2489  VkDeviceSize offset;
    2494  VkDeviceSize size;
    2508  void* pUserData;
    2510 
    2521 VkResult vmaAllocateMemory(
    2522  VmaAllocator allocator,
    2523  const VkMemoryRequirements* pVkMemoryRequirements,
    2524  const VmaAllocationCreateInfo* pCreateInfo,
    2525  VmaAllocation* pAllocation,
    2526  VmaAllocationInfo* pAllocationInfo);
    2527 
    2547 VkResult vmaAllocateMemoryPages(
    2548  VmaAllocator allocator,
    2549  const VkMemoryRequirements* pVkMemoryRequirements,
    2550  const VmaAllocationCreateInfo* pCreateInfo,
    2551  size_t allocationCount,
    2552  VmaAllocation* pAllocations,
    2553  VmaAllocationInfo* pAllocationInfo);
    2554 
    2562  VmaAllocator allocator,
    2563  VkBuffer buffer,
    2564  const VmaAllocationCreateInfo* pCreateInfo,
    2565  VmaAllocation* pAllocation,
    2566  VmaAllocationInfo* pAllocationInfo);
    2567 
    2569 VkResult vmaAllocateMemoryForImage(
    2570  VmaAllocator allocator,
    2571  VkImage image,
    2572  const VmaAllocationCreateInfo* pCreateInfo,
    2573  VmaAllocation* pAllocation,
    2574  VmaAllocationInfo* pAllocationInfo);
    2575 
    2580 void vmaFreeMemory(
    2581  VmaAllocator allocator,
    2582  VmaAllocation allocation);
    2583 
    2594 void vmaFreeMemoryPages(
    2595  VmaAllocator allocator,
    2596  size_t allocationCount,
    2597  VmaAllocation* pAllocations);
    2598 
    2619 VkResult vmaResizeAllocation(
    2620  VmaAllocator allocator,
    2621  VmaAllocation allocation,
    2622  VkDeviceSize newSize);
    2623 
    2641  VmaAllocator allocator,
    2642  VmaAllocation allocation,
    2643  VmaAllocationInfo* pAllocationInfo);
    2644 
    2659 VkBool32 vmaTouchAllocation(
    2660  VmaAllocator allocator,
    2661  VmaAllocation allocation);
    2662 
    2677  VmaAllocator allocator,
    2678  VmaAllocation allocation,
    2679  void* pUserData);
    2680 
    2692  VmaAllocator allocator,
    2693  VmaAllocation* pAllocation);
    2694 
    2729 VkResult vmaMapMemory(
    2730  VmaAllocator allocator,
    2731  VmaAllocation allocation,
    2732  void** ppData);
    2733 
    2738 void vmaUnmapMemory(
    2739  VmaAllocator allocator,
    2740  VmaAllocation allocation);
    2741 
    2754 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2755 
    2768 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2769 
    2786 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
    2787 
    2794 VK_DEFINE_HANDLE(VmaDefragmentationContext)
    2795 
    2796 typedef enum VmaDefragmentationFlagBits {
    2800 typedef VkFlags VmaDefragmentationFlags;
    2801 
    2806 typedef struct VmaDefragmentationInfo2 {
    2830  uint32_t poolCount;
    2851  VkDeviceSize maxCpuBytesToMove;
    2861  VkDeviceSize maxGpuBytesToMove;
    2875  VkCommandBuffer commandBuffer;
    2877 
    2882 typedef struct VmaDefragmentationInfo {
    2887  VkDeviceSize maxBytesToMove;
    2894 
    2896 typedef struct VmaDefragmentationStats {
    2898  VkDeviceSize bytesMoved;
    2900  VkDeviceSize bytesFreed;
    2906 
    2933 VkResult vmaDefragmentationBegin(
    2934  VmaAllocator allocator,
    2935  const VmaDefragmentationInfo2* pInfo,
    2936  VmaDefragmentationStats* pStats,
    2937  VmaDefragmentationContext *pContext);
    2938 
    2944 VkResult vmaDefragmentationEnd(
    2945  VmaAllocator allocator,
    2946  VmaDefragmentationContext context);
    2947 
    2988 VkResult vmaDefragment(
    2989  VmaAllocator allocator,
    2990  VmaAllocation* pAllocations,
    2991  size_t allocationCount,
    2992  VkBool32* pAllocationsChanged,
    2993  const VmaDefragmentationInfo *pDefragmentationInfo,
    2994  VmaDefragmentationStats* pDefragmentationStats);
    2995 
    3008 VkResult vmaBindBufferMemory(
    3009  VmaAllocator allocator,
    3010  VmaAllocation allocation,
    3011  VkBuffer buffer);
    3012 
    3025 VkResult vmaBindImageMemory(
    3026  VmaAllocator allocator,
    3027  VmaAllocation allocation,
    3028  VkImage image);
    3029 
    3056 VkResult vmaCreateBuffer(
    3057  VmaAllocator allocator,
    3058  const VkBufferCreateInfo* pBufferCreateInfo,
    3059  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    3060  VkBuffer* pBuffer,
    3061  VmaAllocation* pAllocation,
    3062  VmaAllocationInfo* pAllocationInfo);
    3063 
    3075 void vmaDestroyBuffer(
    3076  VmaAllocator allocator,
    3077  VkBuffer buffer,
    3078  VmaAllocation allocation);
    3079 
    3081 VkResult vmaCreateImage(
    3082  VmaAllocator allocator,
    3083  const VkImageCreateInfo* pImageCreateInfo,
    3084  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    3085  VkImage* pImage,
    3086  VmaAllocation* pAllocation,
    3087  VmaAllocationInfo* pAllocationInfo);
    3088 
    3100 void vmaDestroyImage(
    3101  VmaAllocator allocator,
    3102  VkImage image,
    3103  VmaAllocation allocation);
    3104 
    3105 #ifdef __cplusplus
    3106 }
    3107 #endif
    3108 
    3109 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    3110 
    3111 // For Visual Studio IntelliSense.
    3112 #if defined(__cplusplus) && defined(__INTELLISENSE__)
    3113 #define VMA_IMPLEMENTATION
    3114 #endif
    3115 
    3116 #ifdef VMA_IMPLEMENTATION
    3117 #undef VMA_IMPLEMENTATION
    3118 
    3119 #include <cstdint>
    3120 #include <cstdlib>
    3121 #include <cstring>
    3122 
    3123 /*******************************************************************************
    3124 CONFIGURATION SECTION
    3125 
    3126 Define some of these macros before each #include of this header or change them
    3127 here if you need other then default behavior depending on your environment.
    3128 */
    3129 
    3130 /*
    3131 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    3132 internally, like:
    3133 
    3134  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    3135 
    3136 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    3137 VmaAllocatorCreateInfo::pVulkanFunctions.
    3138 */
    3139 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
    3140 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    3141 #endif
    3142 
    3143 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    3144 //#define VMA_USE_STL_CONTAINERS 1
    3145 
    3146 /* Set this macro to 1 to make the library including and using STL containers:
    3147 std::pair, std::vector, std::list, std::unordered_map.
    3148 
    3149 Set it to 0 or undefined to make the library using its own implementation of
    3150 the containers.
    3151 */
    3152 #if VMA_USE_STL_CONTAINERS
    3153  #define VMA_USE_STL_VECTOR 1
    3154  #define VMA_USE_STL_UNORDERED_MAP 1
    3155  #define VMA_USE_STL_LIST 1
    3156 #endif
    3157 
    3158 #ifndef VMA_USE_STL_SHARED_MUTEX
    3159  // Compiler conforms to C++17.
    3160  #if __cplusplus >= 201703L
    3161  #define VMA_USE_STL_SHARED_MUTEX 1
    3162  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
    3163  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
    3164  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
    3165  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
    3166  #define VMA_USE_STL_SHARED_MUTEX 1
    3167  #else
    3168  #define VMA_USE_STL_SHARED_MUTEX 0
    3169  #endif
    3170 #endif
    3171 
    3172 #if VMA_USE_STL_VECTOR
    3173  #include <vector>
    3174 #endif
    3175 
    3176 #if VMA_USE_STL_UNORDERED_MAP
    3177  #include <unordered_map>
    3178 #endif
    3179 
    3180 #if VMA_USE_STL_LIST
    3181  #include <list>
    3182 #endif
    3183 
    3184 /*
    3185 Following headers are used in this CONFIGURATION section only, so feel free to
    3186 remove them if not needed.
    3187 */
    3188 #include <cassert> // for assert
    3189 #include <algorithm> // for min, max
    3190 #include <mutex>
    3191 #include <atomic> // for std::atomic
    3192 
    3193 #ifndef VMA_NULL
    3194  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    3195  #define VMA_NULL nullptr
    3196 #endif
    3197 
    3198 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
    3199 #include <cstdlib>
    3200 void *aligned_alloc(size_t alignment, size_t size)
    3201 {
    3202  // alignment must be >= sizeof(void*)
    3203  if(alignment < sizeof(void*))
    3204  {
    3205  alignment = sizeof(void*);
    3206  }
    3207 
    3208  return memalign(alignment, size);
    3209 }
    3210 #elif defined(__APPLE__) || defined(__ANDROID__)
    3211 #include <cstdlib>
    3212 void *aligned_alloc(size_t alignment, size_t size)
    3213 {
    3214  // alignment must be >= sizeof(void*)
    3215  if(alignment < sizeof(void*))
    3216  {
    3217  alignment = sizeof(void*);
    3218  }
    3219 
    3220  void *pointer;
    3221  if(posix_memalign(&pointer, alignment, size) == 0)
    3222  return pointer;
    3223  return VMA_NULL;
    3224 }
    3225 #endif
    3226 
    3227 // If your compiler is not compatible with C++11 and definition of
    3228 // aligned_alloc() function is missing, uncommeting following line may help:
    3229 
    3230 //#include <malloc.h>
    3231 
    3232 // Normal assert to check for programmer's errors, especially in Debug configuration.
    3233 #ifndef VMA_ASSERT
    3234  #ifdef _DEBUG
    3235  #define VMA_ASSERT(expr) assert(expr)
    3236  #else
    3237  #define VMA_ASSERT(expr)
    3238  #endif
    3239 #endif
    3240 
    3241 // Assert that will be called very often, like inside data structures e.g. operator[].
    3242 // Making it non-empty can make program slow.
    3243 #ifndef VMA_HEAVY_ASSERT
    3244  #ifdef _DEBUG
    3245  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    3246  #else
    3247  #define VMA_HEAVY_ASSERT(expr)
    3248  #endif
    3249 #endif
    3250 
    3251 #ifndef VMA_ALIGN_OF
    3252  #define VMA_ALIGN_OF(type) (__alignof(type))
    3253 #endif
    3254 
    3255 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    3256  #if defined(_WIN32)
    3257  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    3258  #else
    3259  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    3260  #endif
    3261 #endif
    3262 
    3263 #ifndef VMA_SYSTEM_FREE
    3264  #if defined(_WIN32)
    3265  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    3266  #else
    3267  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    3268  #endif
    3269 #endif
    3270 
    3271 #ifndef VMA_MIN
    3272  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    3273 #endif
    3274 
    3275 #ifndef VMA_MAX
    3276  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    3277 #endif
    3278 
    3279 #ifndef VMA_SWAP
    3280  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    3281 #endif
    3282 
    3283 #ifndef VMA_SORT
    3284  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    3285 #endif
    3286 
    3287 #ifndef VMA_DEBUG_LOG
    3288  #define VMA_DEBUG_LOG(format, ...)
    3289  /*
    3290  #define VMA_DEBUG_LOG(format, ...) do { \
    3291  printf(format, __VA_ARGS__); \
    3292  printf("\n"); \
    3293  } while(false)
    3294  */
    3295 #endif
    3296 
    3297 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    3298 #if VMA_STATS_STRING_ENABLED
    3299  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    3300  {
    3301  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    3302  }
    3303  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    3304  {
    3305  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    3306  }
    3307  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    3308  {
    3309  snprintf(outStr, strLen, "%p", ptr);
    3310  }
    3311 #endif
    3312 
    3313 #ifndef VMA_MUTEX
    3314  class VmaMutex
    3315  {
    3316  public:
    3317  void Lock() { m_Mutex.lock(); }
    3318  void Unlock() { m_Mutex.unlock(); }
    3319  private:
    3320  std::mutex m_Mutex;
    3321  };
    3322  #define VMA_MUTEX VmaMutex
    3323 #endif
    3324 
    3325 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
    3326 #ifndef VMA_RW_MUTEX
    3327  #if VMA_USE_STL_SHARED_MUTEX
    3328  // Use std::shared_mutex from C++17.
    3329  #include <shared_mutex>
    3330  class VmaRWMutex
    3331  {
    3332  public:
    3333  void LockRead() { m_Mutex.lock_shared(); }
    3334  void UnlockRead() { m_Mutex.unlock_shared(); }
    3335  void LockWrite() { m_Mutex.lock(); }
    3336  void UnlockWrite() { m_Mutex.unlock(); }
    3337  private:
    3338  std::shared_mutex m_Mutex;
    3339  };
    3340  #define VMA_RW_MUTEX VmaRWMutex
    3341  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
    3342  // Use SRWLOCK from WinAPI.
    3343  // Minimum supported client = Windows Vista, server = Windows Server 2008.
    3344  class VmaRWMutex
    3345  {
    3346  public:
    3347  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
    3348  void LockRead() { AcquireSRWLockShared(&m_Lock); }
    3349  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
    3350  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
    3351  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
    3352  private:
    3353  SRWLOCK m_Lock;
    3354  };
    3355  #define VMA_RW_MUTEX VmaRWMutex
    3356  #else
    3357  // Less efficient fallback: Use normal mutex.
    3358  class VmaRWMutex
    3359  {
    3360  public:
    3361  void LockRead() { m_Mutex.Lock(); }
    3362  void UnlockRead() { m_Mutex.Unlock(); }
    3363  void LockWrite() { m_Mutex.Lock(); }
    3364  void UnlockWrite() { m_Mutex.Unlock(); }
    3365  private:
    3366  VMA_MUTEX m_Mutex;
    3367  };
    3368  #define VMA_RW_MUTEX VmaRWMutex
    3369  #endif // #if VMA_USE_STL_SHARED_MUTEX
    3370 #endif // #ifndef VMA_RW_MUTEX
    3371 
    3372 /*
    3373 If providing your own implementation, you need to implement a subset of std::atomic:
    3374 
    3375 - Constructor(uint32_t desired)
    3376 - uint32_t load() const
    3377 - void store(uint32_t desired)
    3378 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    3379 */
    3380 #ifndef VMA_ATOMIC_UINT32
    3381  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    3382 #endif
    3383 
    3384 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    3385 
    3389  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    3390 #endif
    3391 
    3392 #ifndef VMA_DEBUG_ALIGNMENT
    3393 
    3397  #define VMA_DEBUG_ALIGNMENT (1)
    3398 #endif
    3399 
    3400 #ifndef VMA_DEBUG_MARGIN
    3401 
    3405  #define VMA_DEBUG_MARGIN (0)
    3406 #endif
    3407 
    3408 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
    3409 
    3413  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
    3414 #endif
    3415 
    3416 #ifndef VMA_DEBUG_DETECT_CORRUPTION
    3417 
    3422  #define VMA_DEBUG_DETECT_CORRUPTION (0)
    3423 #endif
    3424 
    3425 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    3426 
    3430  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    3431 #endif
    3432 
    3433 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    3434 
    3438  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    3439 #endif
    3440 
    3441 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    3442  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
    3444 #endif
    3445 
    3446 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    3447  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
    3449 #endif
    3450 
    3451 #ifndef VMA_CLASS_NO_COPY
    3452  #define VMA_CLASS_NO_COPY(className) \
    3453  private: \
    3454  className(const className&) = delete; \
    3455  className& operator=(const className&) = delete;
    3456 #endif
    3457 
    3458 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    3459 
    3460 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
    3461 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
    3462 
    3463 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
    3464 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
    3465 
    3466 /*******************************************************************************
    3467 END OF CONFIGURATION
    3468 */
    3469 
    3470 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
    3471 
    3472 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    3473  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    3474 
    3475 // Returns number of bits set to 1 in (v).
    3476 static inline uint32_t VmaCountBitsSet(uint32_t v)
    3477 {
    3478  uint32_t c = v - ((v >> 1) & 0x55555555);
    3479  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    3480  c = ((c >> 4) + c) & 0x0F0F0F0F;
    3481  c = ((c >> 8) + c) & 0x00FF00FF;
    3482  c = ((c >> 16) + c) & 0x0000FFFF;
    3483  return c;
    3484 }
    3485 
    3486 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    3487 // Use types like uint32_t, uint64_t as T.
    3488 template <typename T>
    3489 static inline T VmaAlignUp(T val, T align)
    3490 {
    3491  return (val + align - 1) / align * align;
    3492 }
    3493 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
    3494 // Use types like uint32_t, uint64_t as T.
    3495 template <typename T>
    3496 static inline T VmaAlignDown(T val, T align)
    3497 {
    3498  return val / align * align;
    3499 }
    3500 
    3501 // Division with mathematical rounding to nearest number.
    3502 template <typename T>
    3503 static inline T VmaRoundDiv(T x, T y)
    3504 {
    3505  return (x + (y / (T)2)) / y;
    3506 }
    3507 
    3508 /*
    3509 Returns true if given number is a power of two.
    3510 T must be unsigned integer number or signed integer but always nonnegative.
    3511 For 0 returns true.
    3512 */
    3513 template <typename T>
    3514 inline bool VmaIsPow2(T x)
    3515 {
    3516  return (x & (x-1)) == 0;
    3517 }
    3518 
    3519 // Returns smallest power of 2 greater or equal to v.
    3520 static inline uint32_t VmaNextPow2(uint32_t v)
    3521 {
    3522  v--;
    3523  v |= v >> 1;
    3524  v |= v >> 2;
    3525  v |= v >> 4;
    3526  v |= v >> 8;
    3527  v |= v >> 16;
    3528  v++;
    3529  return v;
    3530 }
    3531 static inline uint64_t VmaNextPow2(uint64_t v)
    3532 {
    3533  v--;
    3534  v |= v >> 1;
    3535  v |= v >> 2;
    3536  v |= v >> 4;
    3537  v |= v >> 8;
    3538  v |= v >> 16;
    3539  v |= v >> 32;
    3540  v++;
    3541  return v;
    3542 }
    3543 
    3544 // Returns largest power of 2 less or equal to v.
    3545 static inline uint32_t VmaPrevPow2(uint32_t v)
    3546 {
    3547  v |= v >> 1;
    3548  v |= v >> 2;
    3549  v |= v >> 4;
    3550  v |= v >> 8;
    3551  v |= v >> 16;
    3552  v = v ^ (v >> 1);
    3553  return v;
    3554 }
    3555 static inline uint64_t VmaPrevPow2(uint64_t v)
    3556 {
    3557  v |= v >> 1;
    3558  v |= v >> 2;
    3559  v |= v >> 4;
    3560  v |= v >> 8;
    3561  v |= v >> 16;
    3562  v |= v >> 32;
    3563  v = v ^ (v >> 1);
    3564  return v;
    3565 }
    3566 
    3567 static inline bool VmaStrIsEmpty(const char* pStr)
    3568 {
    3569  return pStr == VMA_NULL || *pStr == '\0';
    3570 }
    3571 
    3572 #if VMA_STATS_STRING_ENABLED
    3573 
    3574 static const char* VmaAlgorithmToStr(uint32_t algorithm)
    3575 {
    3576  switch(algorithm)
    3577  {
    3579  return "Linear";
    3581  return "Buddy";
    3582  case 0:
    3583  return "Default";
    3584  default:
    3585  VMA_ASSERT(0);
    3586  return "";
    3587  }
    3588 }
    3589 
    3590 #endif // #if VMA_STATS_STRING_ENABLED
    3591 
    3592 #ifndef VMA_SORT
    3593 
    3594 template<typename Iterator, typename Compare>
    3595 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    3596 {
    3597  Iterator centerValue = end; --centerValue;
    3598  Iterator insertIndex = beg;
    3599  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    3600  {
    3601  if(cmp(*memTypeIndex, *centerValue))
    3602  {
    3603  if(insertIndex != memTypeIndex)
    3604  {
    3605  VMA_SWAP(*memTypeIndex, *insertIndex);
    3606  }
    3607  ++insertIndex;
    3608  }
    3609  }
    3610  if(insertIndex != centerValue)
    3611  {
    3612  VMA_SWAP(*insertIndex, *centerValue);
    3613  }
    3614  return insertIndex;
    3615 }
    3616 
    3617 template<typename Iterator, typename Compare>
    3618 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    3619 {
    3620  if(beg < end)
    3621  {
    3622  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    3623  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    3624  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    3625  }
    3626 }
    3627 
    3628 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    3629 
    3630 #endif // #ifndef VMA_SORT
    3631 
    3632 /*
    3633 Returns true if two memory blocks occupy overlapping pages.
    3634 ResourceA must be in less memory offset than ResourceB.
    3635 
    3636 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    3637 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    3638 */
    3639 static inline bool VmaBlocksOnSamePage(
    3640  VkDeviceSize resourceAOffset,
    3641  VkDeviceSize resourceASize,
    3642  VkDeviceSize resourceBOffset,
    3643  VkDeviceSize pageSize)
    3644 {
    3645  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    3646  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    3647  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    3648  VkDeviceSize resourceBStart = resourceBOffset;
    3649  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    3650  return resourceAEndPage == resourceBStartPage;
    3651 }
    3652 
    3653 enum VmaSuballocationType
    3654 {
    3655  VMA_SUBALLOCATION_TYPE_FREE = 0,
    3656  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    3657  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    3658  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    3659  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    3660  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    3661  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    3662 };
    3663 
    3664 /*
    3665 Returns true if given suballocation types could conflict and must respect
    3666 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    3667 or linear image and another one is optimal image. If type is unknown, behave
    3668 conservatively.
    3669 */
    3670 static inline bool VmaIsBufferImageGranularityConflict(
    3671  VmaSuballocationType suballocType1,
    3672  VmaSuballocationType suballocType2)
    3673 {
    3674  if(suballocType1 > suballocType2)
    3675  {
    3676  VMA_SWAP(suballocType1, suballocType2);
    3677  }
    3678 
    3679  switch(suballocType1)
    3680  {
    3681  case VMA_SUBALLOCATION_TYPE_FREE:
    3682  return false;
    3683  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    3684  return true;
    3685  case VMA_SUBALLOCATION_TYPE_BUFFER:
    3686  return
    3687  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3688  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3689  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    3690  return
    3691  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3692  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    3693  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3694  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    3695  return
    3696  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3697  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    3698  return false;
    3699  default:
    3700  VMA_ASSERT(0);
    3701  return true;
    3702  }
    3703 }
    3704 
    3705 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
    3706 {
    3707  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
    3708  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3709  for(size_t i = 0; i < numberCount; ++i, ++pDst)
    3710  {
    3711  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
    3712  }
    3713 }
    3714 
    3715 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
    3716 {
    3717  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
    3718  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3719  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
    3720  {
    3721  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
    3722  {
    3723  return false;
    3724  }
    3725  }
    3726  return true;
    3727 }
    3728 
    3729 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    3730 struct VmaMutexLock
    3731 {
    3732  VMA_CLASS_NO_COPY(VmaMutexLock)
    3733 public:
    3734  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
    3735  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3736  { if(m_pMutex) { m_pMutex->Lock(); } }
    3737  ~VmaMutexLock()
    3738  { if(m_pMutex) { m_pMutex->Unlock(); } }
    3739 private:
    3740  VMA_MUTEX* m_pMutex;
    3741 };
    3742 
    3743 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
    3744 struct VmaMutexLockRead
    3745 {
    3746  VMA_CLASS_NO_COPY(VmaMutexLockRead)
    3747 public:
    3748  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
    3749  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3750  { if(m_pMutex) { m_pMutex->LockRead(); } }
    3751  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
    3752 private:
    3753  VMA_RW_MUTEX* m_pMutex;
    3754 };
    3755 
    3756 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
    3757 struct VmaMutexLockWrite
    3758 {
    3759  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
    3760 public:
    3761  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
    3762  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3763  { if(m_pMutex) { m_pMutex->LockWrite(); } }
    3764  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
    3765 private:
    3766  VMA_RW_MUTEX* m_pMutex;
    3767 };
    3768 
    3769 #if VMA_DEBUG_GLOBAL_MUTEX
    3770  static VMA_MUTEX gDebugGlobalMutex;
    3771  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    3772 #else
    3773  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    3774 #endif
    3775 
    3776 // Minimum size of a free suballocation to register it in the free suballocation collection.
    3777 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    3778 
    3779 /*
    3780 Performs binary search and returns iterator to first element that is greater or
    3781 equal to (key), according to comparison (cmp).
    3782 
    3783 Cmp should return true if first argument is less than second argument.
    3784 
    3785 Returned value is the found element, if present in the collection or place where
    3786 new element with value (key) should be inserted.
    3787 */
    3788 template <typename CmpLess, typename IterT, typename KeyT>
    3789 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
    3790 {
    3791  size_t down = 0, up = (end - beg);
    3792  while(down < up)
    3793  {
    3794  const size_t mid = (down + up) / 2;
    3795  if(cmp(*(beg+mid), key))
    3796  {
    3797  down = mid + 1;
    3798  }
    3799  else
    3800  {
    3801  up = mid;
    3802  }
    3803  }
    3804  return beg + down;
    3805 }
    3806 
    3807 /*
    3808 Returns true if all pointers in the array are not-null and unique.
    3809 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
    3810 T must be pointer type, e.g. VmaAllocation, VmaPool.
    3811 */
    3812 template<typename T>
    3813 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
    3814 {
    3815  for(uint32_t i = 0; i < count; ++i)
    3816  {
    3817  const T iPtr = arr[i];
    3818  if(iPtr == VMA_NULL)
    3819  {
    3820  return false;
    3821  }
    3822  for(uint32_t j = i + 1; j < count; ++j)
    3823  {
    3824  if(iPtr == arr[j])
    3825  {
    3826  return false;
    3827  }
    3828  }
    3829  }
    3830  return true;
    3831 }
    3832 
    3834 // Memory allocation
    3835 
    3836 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    3837 {
    3838  if((pAllocationCallbacks != VMA_NULL) &&
    3839  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    3840  {
    3841  return (*pAllocationCallbacks->pfnAllocation)(
    3842  pAllocationCallbacks->pUserData,
    3843  size,
    3844  alignment,
    3845  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    3846  }
    3847  else
    3848  {
    3849  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    3850  }
    3851 }
    3852 
    3853 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    3854 {
    3855  if((pAllocationCallbacks != VMA_NULL) &&
    3856  (pAllocationCallbacks->pfnFree != VMA_NULL))
    3857  {
    3858  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    3859  }
    3860  else
    3861  {
    3862  VMA_SYSTEM_FREE(ptr);
    3863  }
    3864 }
    3865 
    3866 template<typename T>
    3867 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    3868 {
    3869  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    3870 }
    3871 
    3872 template<typename T>
    3873 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    3874 {
    3875  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    3876 }
    3877 
    3878 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    3879 
    3880 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    3881 
    3882 template<typename T>
    3883 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    3884 {
    3885  ptr->~T();
    3886  VmaFree(pAllocationCallbacks, ptr);
    3887 }
    3888 
    3889 template<typename T>
    3890 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    3891 {
    3892  if(ptr != VMA_NULL)
    3893  {
    3894  for(size_t i = count; i--; )
    3895  {
    3896  ptr[i].~T();
    3897  }
    3898  VmaFree(pAllocationCallbacks, ptr);
    3899  }
    3900 }
    3901 
    3902 // STL-compatible allocator.
    3903 template<typename T>
    3904 class VmaStlAllocator
    3905 {
    3906 public:
    3907  const VkAllocationCallbacks* const m_pCallbacks;
    3908  typedef T value_type;
    3909 
    3910  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    3911  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    3912 
    3913  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    3914  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    3915 
    3916  template<typename U>
    3917  bool operator==(const VmaStlAllocator<U>& rhs) const
    3918  {
    3919  return m_pCallbacks == rhs.m_pCallbacks;
    3920  }
    3921  template<typename U>
    3922  bool operator!=(const VmaStlAllocator<U>& rhs) const
    3923  {
    3924  return m_pCallbacks != rhs.m_pCallbacks;
    3925  }
    3926 
    3927  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    3928 };
    3929 
    3930 #if VMA_USE_STL_VECTOR
    3931 
    3932 #define VmaVector std::vector
    3933 
    3934 template<typename T, typename allocatorT>
    3935 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    3936 {
    3937  vec.insert(vec.begin() + index, item);
    3938 }
    3939 
    3940 template<typename T, typename allocatorT>
    3941 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    3942 {
    3943  vec.erase(vec.begin() + index);
    3944 }
    3945 
    3946 #else // #if VMA_USE_STL_VECTOR
    3947 
    3948 /* Class with interface compatible with subset of std::vector.
    3949 T must be POD because constructors and destructors are not called and memcpy is
    3950 used for these objects. */
    3951 template<typename T, typename AllocatorT>
    3952 class VmaVector
    3953 {
    3954 public:
    3955  typedef T value_type;
    3956 
    3957  VmaVector(const AllocatorT& allocator) :
    3958  m_Allocator(allocator),
    3959  m_pArray(VMA_NULL),
    3960  m_Count(0),
    3961  m_Capacity(0)
    3962  {
    3963  }
    3964 
    3965  VmaVector(size_t count, const AllocatorT& allocator) :
    3966  m_Allocator(allocator),
    3967  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    3968  m_Count(count),
    3969  m_Capacity(count)
    3970  {
    3971  }
    3972 
    3973  VmaVector(const VmaVector<T, AllocatorT>& src) :
    3974  m_Allocator(src.m_Allocator),
    3975  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    3976  m_Count(src.m_Count),
    3977  m_Capacity(src.m_Count)
    3978  {
    3979  if(m_Count != 0)
    3980  {
    3981  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    3982  }
    3983  }
    3984 
    3985  ~VmaVector()
    3986  {
    3987  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3988  }
    3989 
    3990  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    3991  {
    3992  if(&rhs != this)
    3993  {
    3994  resize(rhs.m_Count);
    3995  if(m_Count != 0)
    3996  {
    3997  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    3998  }
    3999  }
    4000  return *this;
    4001  }
    4002 
    4003  bool empty() const { return m_Count == 0; }
    4004  size_t size() const { return m_Count; }
    4005  T* data() { return m_pArray; }
    4006  const T* data() const { return m_pArray; }
    4007 
    4008  T& operator[](size_t index)
    4009  {
    4010  VMA_HEAVY_ASSERT(index < m_Count);
    4011  return m_pArray[index];
    4012  }
    4013  const T& operator[](size_t index) const
    4014  {
    4015  VMA_HEAVY_ASSERT(index < m_Count);
    4016  return m_pArray[index];
    4017  }
    4018 
    4019  T& front()
    4020  {
    4021  VMA_HEAVY_ASSERT(m_Count > 0);
    4022  return m_pArray[0];
    4023  }
    4024  const T& front() const
    4025  {
    4026  VMA_HEAVY_ASSERT(m_Count > 0);
    4027  return m_pArray[0];
    4028  }
    4029  T& back()
    4030  {
    4031  VMA_HEAVY_ASSERT(m_Count > 0);
    4032  return m_pArray[m_Count - 1];
    4033  }
    4034  const T& back() const
    4035  {
    4036  VMA_HEAVY_ASSERT(m_Count > 0);
    4037  return m_pArray[m_Count - 1];
    4038  }
    4039 
    4040  void reserve(size_t newCapacity, bool freeMemory = false)
    4041  {
    4042  newCapacity = VMA_MAX(newCapacity, m_Count);
    4043 
    4044  if((newCapacity < m_Capacity) && !freeMemory)
    4045  {
    4046  newCapacity = m_Capacity;
    4047  }
    4048 
    4049  if(newCapacity != m_Capacity)
    4050  {
    4051  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    4052  if(m_Count != 0)
    4053  {
    4054  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    4055  }
    4056  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    4057  m_Capacity = newCapacity;
    4058  m_pArray = newArray;
    4059  }
    4060  }
    4061 
    4062  void resize(size_t newCount, bool freeMemory = false)
    4063  {
    4064  size_t newCapacity = m_Capacity;
    4065  if(newCount > m_Capacity)
    4066  {
    4067  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    4068  }
    4069  else if(freeMemory)
    4070  {
    4071  newCapacity = newCount;
    4072  }
    4073 
    4074  if(newCapacity != m_Capacity)
    4075  {
    4076  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    4077  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    4078  if(elementsToCopy != 0)
    4079  {
    4080  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    4081  }
    4082  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    4083  m_Capacity = newCapacity;
    4084  m_pArray = newArray;
    4085  }
    4086 
    4087  m_Count = newCount;
    4088  }
    4089 
    4090  void clear(bool freeMemory = false)
    4091  {
    4092  resize(0, freeMemory);
    4093  }
    4094 
    4095  void insert(size_t index, const T& src)
    4096  {
    4097  VMA_HEAVY_ASSERT(index <= m_Count);
    4098  const size_t oldCount = size();
    4099  resize(oldCount + 1);
    4100  if(index < oldCount)
    4101  {
    4102  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    4103  }
    4104  m_pArray[index] = src;
    4105  }
    4106 
    4107  void remove(size_t index)
    4108  {
    4109  VMA_HEAVY_ASSERT(index < m_Count);
    4110  const size_t oldCount = size();
    4111  if(index < oldCount - 1)
    4112  {
    4113  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    4114  }
    4115  resize(oldCount - 1);
    4116  }
    4117 
    4118  void push_back(const T& src)
    4119  {
    4120  const size_t newIndex = size();
    4121  resize(newIndex + 1);
    4122  m_pArray[newIndex] = src;
    4123  }
    4124 
    4125  void pop_back()
    4126  {
    4127  VMA_HEAVY_ASSERT(m_Count > 0);
    4128  resize(size() - 1);
    4129  }
    4130 
    4131  void push_front(const T& src)
    4132  {
    4133  insert(0, src);
    4134  }
    4135 
    4136  void pop_front()
    4137  {
    4138  VMA_HEAVY_ASSERT(m_Count > 0);
    4139  remove(0);
    4140  }
    4141 
    4142  typedef T* iterator;
    4143 
    4144  iterator begin() { return m_pArray; }
    4145  iterator end() { return m_pArray + m_Count; }
    4146 
    4147 private:
    4148  AllocatorT m_Allocator;
    4149  T* m_pArray;
    4150  size_t m_Count;
    4151  size_t m_Capacity;
    4152 };
    4153 
    4154 template<typename T, typename allocatorT>
    4155 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    4156 {
    4157  vec.insert(index, item);
    4158 }
    4159 
    4160 template<typename T, typename allocatorT>
    4161 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    4162 {
    4163  vec.remove(index);
    4164 }
    4165 
    4166 #endif // #if VMA_USE_STL_VECTOR
    4167 
    4168 template<typename CmpLess, typename VectorT>
    4169 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    4170 {
    4171  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4172  vector.data(),
    4173  vector.data() + vector.size(),
    4174  value,
    4175  CmpLess()) - vector.data();
    4176  VmaVectorInsert(vector, indexToInsert, value);
    4177  return indexToInsert;
    4178 }
    4179 
    4180 template<typename CmpLess, typename VectorT>
    4181 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    4182 {
    4183  CmpLess comparator;
    4184  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    4185  vector.begin(),
    4186  vector.end(),
    4187  value,
    4188  comparator);
    4189  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    4190  {
    4191  size_t indexToRemove = it - vector.begin();
    4192  VmaVectorRemove(vector, indexToRemove);
    4193  return true;
    4194  }
    4195  return false;
    4196 }
    4197 
    4198 template<typename CmpLess, typename IterT, typename KeyT>
    4199 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
    4200 {
    4201  CmpLess comparator;
    4202  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
    4203  beg, end, value, comparator);
    4204  if(it == end ||
    4205  (!comparator(*it, value) && !comparator(value, *it)))
    4206  {
    4207  return it;
    4208  }
    4209  return end;
    4210 }
    4211 
    4213 // class VmaPoolAllocator
    4214 
    4215 /*
    4216 Allocator for objects of type T using a list of arrays (pools) to speed up
    4217 allocation. Number of elements that can be allocated is not bounded because
    4218 allocator can create multiple blocks.
    4219 */
    4220 template<typename T>
    4221 class VmaPoolAllocator
    4222 {
    4223  VMA_CLASS_NO_COPY(VmaPoolAllocator)
    4224 public:
    4225  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
    4226  ~VmaPoolAllocator();
    4227  void Clear();
    4228  T* Alloc();
    4229  void Free(T* ptr);
    4230 
    4231 private:
    4232  union Item
    4233  {
    4234  uint32_t NextFreeIndex;
    4235  T Value;
    4236  };
    4237 
    4238  struct ItemBlock
    4239  {
    4240  Item* pItems;
    4241  uint32_t Capacity;
    4242  uint32_t FirstFreeIndex;
    4243  };
    4244 
    4245  const VkAllocationCallbacks* m_pAllocationCallbacks;
    4246  const uint32_t m_FirstBlockCapacity;
    4247  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    4248 
    4249  ItemBlock& CreateNewBlock();
    4250 };
    4251 
    4252 template<typename T>
    4253 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
    4254  m_pAllocationCallbacks(pAllocationCallbacks),
    4255  m_FirstBlockCapacity(firstBlockCapacity),
    4256  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    4257 {
    4258  VMA_ASSERT(m_FirstBlockCapacity > 1);
    4259 }
    4260 
    4261 template<typename T>
    4262 VmaPoolAllocator<T>::~VmaPoolAllocator()
    4263 {
    4264  Clear();
    4265 }
    4266 
    4267 template<typename T>
    4268 void VmaPoolAllocator<T>::Clear()
    4269 {
    4270  for(size_t i = m_ItemBlocks.size(); i--; )
    4271  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
    4272  m_ItemBlocks.clear();
    4273 }
    4274 
    4275 template<typename T>
    4276 T* VmaPoolAllocator<T>::Alloc()
    4277 {
    4278  for(size_t i = m_ItemBlocks.size(); i--; )
    4279  {
    4280  ItemBlock& block = m_ItemBlocks[i];
    4281  // This block has some free items: Use first one.
    4282  if(block.FirstFreeIndex != UINT32_MAX)
    4283  {
    4284  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    4285  block.FirstFreeIndex = pItem->NextFreeIndex;
    4286  return &pItem->Value;
    4287  }
    4288  }
    4289 
    4290  // No block has free item: Create new one and use it.
    4291  ItemBlock& newBlock = CreateNewBlock();
    4292  Item* const pItem = &newBlock.pItems[0];
    4293  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    4294  return &pItem->Value;
    4295 }
    4296 
    4297 template<typename T>
    4298 void VmaPoolAllocator<T>::Free(T* ptr)
    4299 {
    4300  // Search all memory blocks to find ptr.
    4301  for(size_t i = m_ItemBlocks.size(); i--; )
    4302  {
    4303  ItemBlock& block = m_ItemBlocks[i];
    4304 
    4305  // Casting to union.
    4306  Item* pItemPtr;
    4307  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    4308 
    4309  // Check if pItemPtr is in address range of this block.
    4310  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
    4311  {
    4312  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    4313  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    4314  block.FirstFreeIndex = index;
    4315  return;
    4316  }
    4317  }
    4318  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    4319 }
    4320 
    4321 template<typename T>
    4322 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    4323 {
    4324  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
    4325  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
    4326 
    4327  const ItemBlock newBlock = {
    4328  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
    4329  newBlockCapacity,
    4330  0 };
    4331 
    4332  m_ItemBlocks.push_back(newBlock);
    4333 
    4334  // Setup singly-linked list of all free items in this block.
    4335  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
    4336  newBlock.pItems[i].NextFreeIndex = i + 1;
    4337  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
    4338  return m_ItemBlocks.back();
    4339 }
    4340 
    4342 // class VmaRawList, VmaList
    4343 
    4344 #if VMA_USE_STL_LIST
    4345 
    4346 #define VmaList std::list
    4347 
    4348 #else // #if VMA_USE_STL_LIST
    4349 
    4350 template<typename T>
    4351 struct VmaListItem
    4352 {
    4353  VmaListItem* pPrev;
    4354  VmaListItem* pNext;
    4355  T Value;
    4356 };
    4357 
    4358 // Doubly linked list.
    4359 template<typename T>
    4360 class VmaRawList
    4361 {
    4362  VMA_CLASS_NO_COPY(VmaRawList)
    4363 public:
    4364  typedef VmaListItem<T> ItemType;
    4365 
    4366  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    4367  ~VmaRawList();
    4368  void Clear();
    4369 
    4370  size_t GetCount() const { return m_Count; }
    4371  bool IsEmpty() const { return m_Count == 0; }
    4372 
    4373  ItemType* Front() { return m_pFront; }
    4374  const ItemType* Front() const { return m_pFront; }
    4375  ItemType* Back() { return m_pBack; }
    4376  const ItemType* Back() const { return m_pBack; }
    4377 
    4378  ItemType* PushBack();
    4379  ItemType* PushFront();
    4380  ItemType* PushBack(const T& value);
    4381  ItemType* PushFront(const T& value);
    4382  void PopBack();
    4383  void PopFront();
    4384 
    4385  // Item can be null - it means PushBack.
    4386  ItemType* InsertBefore(ItemType* pItem);
    4387  // Item can be null - it means PushFront.
    4388  ItemType* InsertAfter(ItemType* pItem);
    4389 
    4390  ItemType* InsertBefore(ItemType* pItem, const T& value);
    4391  ItemType* InsertAfter(ItemType* pItem, const T& value);
    4392 
    4393  void Remove(ItemType* pItem);
    4394 
    4395 private:
    4396  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    4397  VmaPoolAllocator<ItemType> m_ItemAllocator;
    4398  ItemType* m_pFront;
    4399  ItemType* m_pBack;
    4400  size_t m_Count;
    4401 };
    4402 
    4403 template<typename T>
    4404 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    4405  m_pAllocationCallbacks(pAllocationCallbacks),
    4406  m_ItemAllocator(pAllocationCallbacks, 128),
    4407  m_pFront(VMA_NULL),
    4408  m_pBack(VMA_NULL),
    4409  m_Count(0)
    4410 {
    4411 }
    4412 
    4413 template<typename T>
    4414 VmaRawList<T>::~VmaRawList()
    4415 {
    4416  // Intentionally not calling Clear, because that would be unnecessary
    4417  // computations to return all items to m_ItemAllocator as free.
    4418 }
    4419 
    4420 template<typename T>
    4421 void VmaRawList<T>::Clear()
    4422 {
    4423  if(IsEmpty() == false)
    4424  {
    4425  ItemType* pItem = m_pBack;
    4426  while(pItem != VMA_NULL)
    4427  {
    4428  ItemType* const pPrevItem = pItem->pPrev;
    4429  m_ItemAllocator.Free(pItem);
    4430  pItem = pPrevItem;
    4431  }
    4432  m_pFront = VMA_NULL;
    4433  m_pBack = VMA_NULL;
    4434  m_Count = 0;
    4435  }
    4436 }
    4437 
    4438 template<typename T>
    4439 VmaListItem<T>* VmaRawList<T>::PushBack()
    4440 {
    4441  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    4442  pNewItem->pNext = VMA_NULL;
    4443  if(IsEmpty())
    4444  {
    4445  pNewItem->pPrev = VMA_NULL;
    4446  m_pFront = pNewItem;
    4447  m_pBack = pNewItem;
    4448  m_Count = 1;
    4449  }
    4450  else
    4451  {
    4452  pNewItem->pPrev = m_pBack;
    4453  m_pBack->pNext = pNewItem;
    4454  m_pBack = pNewItem;
    4455  ++m_Count;
    4456  }
    4457  return pNewItem;
    4458 }
    4459 
    4460 template<typename T>
    4461 VmaListItem<T>* VmaRawList<T>::PushFront()
    4462 {
    4463  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    4464  pNewItem->pPrev = VMA_NULL;
    4465  if(IsEmpty())
    4466  {
    4467  pNewItem->pNext = VMA_NULL;
    4468  m_pFront = pNewItem;
    4469  m_pBack = pNewItem;
    4470  m_Count = 1;
    4471  }
    4472  else
    4473  {
    4474  pNewItem->pNext = m_pFront;
    4475  m_pFront->pPrev = pNewItem;
    4476  m_pFront = pNewItem;
    4477  ++m_Count;
    4478  }
    4479  return pNewItem;
    4480 }
    4481 
    4482 template<typename T>
    4483 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    4484 {
    4485  ItemType* const pNewItem = PushBack();
    4486  pNewItem->Value = value;
    4487  return pNewItem;
    4488 }
    4489 
    4490 template<typename T>
    4491 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    4492 {
    4493  ItemType* const pNewItem = PushFront();
    4494  pNewItem->Value = value;
    4495  return pNewItem;
    4496 }
    4497 
    4498 template<typename T>
    4499 void VmaRawList<T>::PopBack()
    4500 {
    4501  VMA_HEAVY_ASSERT(m_Count > 0);
    4502  ItemType* const pBackItem = m_pBack;
    4503  ItemType* const pPrevItem = pBackItem->pPrev;
    4504  if(pPrevItem != VMA_NULL)
    4505  {
    4506  pPrevItem->pNext = VMA_NULL;
    4507  }
    4508  m_pBack = pPrevItem;
    4509  m_ItemAllocator.Free(pBackItem);
    4510  --m_Count;
    4511 }
    4512 
    4513 template<typename T>
    4514 void VmaRawList<T>::PopFront()
    4515 {
    4516  VMA_HEAVY_ASSERT(m_Count > 0);
    4517  ItemType* const pFrontItem = m_pFront;
    4518  ItemType* const pNextItem = pFrontItem->pNext;
    4519  if(pNextItem != VMA_NULL)
    4520  {
    4521  pNextItem->pPrev = VMA_NULL;
    4522  }
    4523  m_pFront = pNextItem;
    4524  m_ItemAllocator.Free(pFrontItem);
    4525  --m_Count;
    4526 }
    4527 
    4528 template<typename T>
    4529 void VmaRawList<T>::Remove(ItemType* pItem)
    4530 {
    4531  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    4532  VMA_HEAVY_ASSERT(m_Count > 0);
    4533 
    4534  if(pItem->pPrev != VMA_NULL)
    4535  {
    4536  pItem->pPrev->pNext = pItem->pNext;
    4537  }
    4538  else
    4539  {
    4540  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4541  m_pFront = pItem->pNext;
    4542  }
    4543 
    4544  if(pItem->pNext != VMA_NULL)
    4545  {
    4546  pItem->pNext->pPrev = pItem->pPrev;
    4547  }
    4548  else
    4549  {
    4550  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4551  m_pBack = pItem->pPrev;
    4552  }
    4553 
    4554  m_ItemAllocator.Free(pItem);
    4555  --m_Count;
    4556 }
    4557 
    4558 template<typename T>
    4559 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    4560 {
    4561  if(pItem != VMA_NULL)
    4562  {
    4563  ItemType* const prevItem = pItem->pPrev;
    4564  ItemType* const newItem = m_ItemAllocator.Alloc();
    4565  newItem->pPrev = prevItem;
    4566  newItem->pNext = pItem;
    4567  pItem->pPrev = newItem;
    4568  if(prevItem != VMA_NULL)
    4569  {
    4570  prevItem->pNext = newItem;
    4571  }
    4572  else
    4573  {
    4574  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4575  m_pFront = newItem;
    4576  }
    4577  ++m_Count;
    4578  return newItem;
    4579  }
    4580  else
    4581  return PushBack();
    4582 }
    4583 
    4584 template<typename T>
    4585 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    4586 {
    4587  if(pItem != VMA_NULL)
    4588  {
    4589  ItemType* const nextItem = pItem->pNext;
    4590  ItemType* const newItem = m_ItemAllocator.Alloc();
    4591  newItem->pNext = nextItem;
    4592  newItem->pPrev = pItem;
    4593  pItem->pNext = newItem;
    4594  if(nextItem != VMA_NULL)
    4595  {
    4596  nextItem->pPrev = newItem;
    4597  }
    4598  else
    4599  {
    4600  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4601  m_pBack = newItem;
    4602  }
    4603  ++m_Count;
    4604  return newItem;
    4605  }
    4606  else
    4607  return PushFront();
    4608 }
    4609 
    4610 template<typename T>
    4611 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    4612 {
    4613  ItemType* const newItem = InsertBefore(pItem);
    4614  newItem->Value = value;
    4615  return newItem;
    4616 }
    4617 
    4618 template<typename T>
    4619 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    4620 {
    4621  ItemType* const newItem = InsertAfter(pItem);
    4622  newItem->Value = value;
    4623  return newItem;
    4624 }
    4625 
    4626 template<typename T, typename AllocatorT>
    4627 class VmaList
    4628 {
    4629  VMA_CLASS_NO_COPY(VmaList)
    4630 public:
    4631  class iterator
    4632  {
    4633  public:
    4634  iterator() :
    4635  m_pList(VMA_NULL),
    4636  m_pItem(VMA_NULL)
    4637  {
    4638  }
    4639 
    4640  T& operator*() const
    4641  {
    4642  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4643  return m_pItem->Value;
    4644  }
    4645  T* operator->() const
    4646  {
    4647  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4648  return &m_pItem->Value;
    4649  }
    4650 
    4651  iterator& operator++()
    4652  {
    4653  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4654  m_pItem = m_pItem->pNext;
    4655  return *this;
    4656  }
    4657  iterator& operator--()
    4658  {
    4659  if(m_pItem != VMA_NULL)
    4660  {
    4661  m_pItem = m_pItem->pPrev;
    4662  }
    4663  else
    4664  {
    4665  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4666  m_pItem = m_pList->Back();
    4667  }
    4668  return *this;
    4669  }
    4670 
    4671  iterator operator++(int)
    4672  {
    4673  iterator result = *this;
    4674  ++*this;
    4675  return result;
    4676  }
    4677  iterator operator--(int)
    4678  {
    4679  iterator result = *this;
    4680  --*this;
    4681  return result;
    4682  }
    4683 
    4684  bool operator==(const iterator& rhs) const
    4685  {
    4686  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4687  return m_pItem == rhs.m_pItem;
    4688  }
    4689  bool operator!=(const iterator& rhs) const
    4690  {
    4691  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4692  return m_pItem != rhs.m_pItem;
    4693  }
    4694 
    4695  private:
    4696  VmaRawList<T>* m_pList;
    4697  VmaListItem<T>* m_pItem;
    4698 
    4699  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    4700  m_pList(pList),
    4701  m_pItem(pItem)
    4702  {
    4703  }
    4704 
    4705  friend class VmaList<T, AllocatorT>;
    4706  };
    4707 
    4708  class const_iterator
    4709  {
    4710  public:
    4711  const_iterator() :
    4712  m_pList(VMA_NULL),
    4713  m_pItem(VMA_NULL)
    4714  {
    4715  }
    4716 
    4717  const_iterator(const iterator& src) :
    4718  m_pList(src.m_pList),
    4719  m_pItem(src.m_pItem)
    4720  {
    4721  }
    4722 
    4723  const T& operator*() const
    4724  {
    4725  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4726  return m_pItem->Value;
    4727  }
    4728  const T* operator->() const
    4729  {
    4730  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4731  return &m_pItem->Value;
    4732  }
    4733 
    4734  const_iterator& operator++()
    4735  {
    4736  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4737  m_pItem = m_pItem->pNext;
    4738  return *this;
    4739  }
    4740  const_iterator& operator--()
    4741  {
    4742  if(m_pItem != VMA_NULL)
    4743  {
    4744  m_pItem = m_pItem->pPrev;
    4745  }
    4746  else
    4747  {
    4748  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4749  m_pItem = m_pList->Back();
    4750  }
    4751  return *this;
    4752  }
    4753 
    4754  const_iterator operator++(int)
    4755  {
    4756  const_iterator result = *this;
    4757  ++*this;
    4758  return result;
    4759  }
    4760  const_iterator operator--(int)
    4761  {
    4762  const_iterator result = *this;
    4763  --*this;
    4764  return result;
    4765  }
    4766 
    4767  bool operator==(const const_iterator& rhs) const
    4768  {
    4769  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4770  return m_pItem == rhs.m_pItem;
    4771  }
    4772  bool operator!=(const const_iterator& rhs) const
    4773  {
    4774  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4775  return m_pItem != rhs.m_pItem;
    4776  }
    4777 
    4778  private:
    4779  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    4780  m_pList(pList),
    4781  m_pItem(pItem)
    4782  {
    4783  }
    4784 
    4785  const VmaRawList<T>* m_pList;
    4786  const VmaListItem<T>* m_pItem;
    4787 
    4788  friend class VmaList<T, AllocatorT>;
    4789  };
    4790 
    4791  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    4792 
    4793  bool empty() const { return m_RawList.IsEmpty(); }
    4794  size_t size() const { return m_RawList.GetCount(); }
    4795 
    4796  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    4797  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    4798 
    4799  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    4800  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    4801 
    4802  void clear() { m_RawList.Clear(); }
    4803  void push_back(const T& value) { m_RawList.PushBack(value); }
    4804  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    4805  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    4806 
    4807 private:
    4808  VmaRawList<T> m_RawList;
    4809 };
    4810 
    4811 #endif // #if VMA_USE_STL_LIST
    4812 
    4814 // class VmaMap
    4815 
    4816 // Unused in this version.
    4817 #if 0
    4818 
    4819 #if VMA_USE_STL_UNORDERED_MAP
    4820 
    4821 #define VmaPair std::pair
    4822 
    4823 #define VMA_MAP_TYPE(KeyT, ValueT) \
    4824  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    4825 
    4826 #else // #if VMA_USE_STL_UNORDERED_MAP
    4827 
    4828 template<typename T1, typename T2>
    4829 struct VmaPair
    4830 {
    4831  T1 first;
    4832  T2 second;
    4833 
    4834  VmaPair() : first(), second() { }
    4835  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    4836 };
    4837 
    4838 /* Class compatible with subset of interface of std::unordered_map.
    4839 KeyT, ValueT must be POD because they will be stored in VmaVector.
    4840 */
    4841 template<typename KeyT, typename ValueT>
    4842 class VmaMap
    4843 {
    4844 public:
    4845  typedef VmaPair<KeyT, ValueT> PairType;
    4846  typedef PairType* iterator;
    4847 
    4848  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    4849 
    4850  iterator begin() { return m_Vector.begin(); }
    4851  iterator end() { return m_Vector.end(); }
    4852 
    4853  void insert(const PairType& pair);
    4854  iterator find(const KeyT& key);
    4855  void erase(iterator it);
    4856 
    4857 private:
    4858  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    4859 };
    4860 
    4861 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    4862 
    4863 template<typename FirstT, typename SecondT>
    4864 struct VmaPairFirstLess
    4865 {
    4866  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    4867  {
    4868  return lhs.first < rhs.first;
    4869  }
    4870  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    4871  {
    4872  return lhs.first < rhsFirst;
    4873  }
    4874 };
    4875 
    4876 template<typename KeyT, typename ValueT>
    4877 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    4878 {
    4879  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4880  m_Vector.data(),
    4881  m_Vector.data() + m_Vector.size(),
    4882  pair,
    4883  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    4884  VmaVectorInsert(m_Vector, indexToInsert, pair);
    4885 }
    4886 
    4887 template<typename KeyT, typename ValueT>
    4888 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    4889 {
    4890  PairType* it = VmaBinaryFindFirstNotLess(
    4891  m_Vector.data(),
    4892  m_Vector.data() + m_Vector.size(),
    4893  key,
    4894  VmaPairFirstLess<KeyT, ValueT>());
    4895  if((it != m_Vector.end()) && (it->first == key))
    4896  {
    4897  return it;
    4898  }
    4899  else
    4900  {
    4901  return m_Vector.end();
    4902  }
    4903 }
    4904 
    4905 template<typename KeyT, typename ValueT>
    4906 void VmaMap<KeyT, ValueT>::erase(iterator it)
    4907 {
    4908  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    4909 }
    4910 
    4911 #endif // #if VMA_USE_STL_UNORDERED_MAP
    4912 
    4913 #endif // #if 0
    4914 
    4916 
    4917 class VmaDeviceMemoryBlock;
    4918 
    4919 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
    4920 
    4921 struct VmaAllocation_T
    4922 {
    4923 private:
    4924  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    4925 
    4926  enum FLAGS
    4927  {
    4928  FLAG_USER_DATA_STRING = 0x01,
    4929  };
    4930 
    4931 public:
    4932  enum ALLOCATION_TYPE
    4933  {
    4934  ALLOCATION_TYPE_NONE,
    4935  ALLOCATION_TYPE_BLOCK,
    4936  ALLOCATION_TYPE_DEDICATED,
    4937  };
    4938 
    4939  /*
    4940  This struct cannot have constructor or destructor. It must be POD because it is
    4941  allocated using VmaPoolAllocator.
    4942  */
    4943 
    4944  void Ctor(uint32_t currentFrameIndex, bool userDataString)
    4945  {
    4946  m_Alignment = 1;
    4947  m_Size = 0;
    4948  m_pUserData = VMA_NULL;
    4949  m_LastUseFrameIndex = currentFrameIndex;
    4950  m_Type = (uint8_t)ALLOCATION_TYPE_NONE;
    4951  m_SuballocationType = (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN;
    4952  m_MapCount = 0;
    4953  m_Flags = userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0;
    4954 
    4955 #if VMA_STATS_STRING_ENABLED
    4956  m_CreationFrameIndex = currentFrameIndex;
    4957  m_BufferImageUsage = 0;
    4958 #endif
    4959  }
    4960 
    4961  void Dtor()
    4962  {
    4963  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    4964 
    4965  // Check if owned string was freed.
    4966  VMA_ASSERT(m_pUserData == VMA_NULL);
    4967  }
    4968 
    4969  void InitBlockAllocation(
    4970  VmaDeviceMemoryBlock* block,
    4971  VkDeviceSize offset,
    4972  VkDeviceSize alignment,
    4973  VkDeviceSize size,
    4974  VmaSuballocationType suballocationType,
    4975  bool mapped,
    4976  bool canBecomeLost)
    4977  {
    4978  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4979  VMA_ASSERT(block != VMA_NULL);
    4980  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4981  m_Alignment = alignment;
    4982  m_Size = size;
    4983  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4984  m_SuballocationType = (uint8_t)suballocationType;
    4985  m_BlockAllocation.m_Block = block;
    4986  m_BlockAllocation.m_Offset = offset;
    4987  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    4988  }
    4989 
    4990  void InitLost()
    4991  {
    4992  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4993  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    4994  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4995  m_BlockAllocation.m_Block = VMA_NULL;
    4996  m_BlockAllocation.m_Offset = 0;
    4997  m_BlockAllocation.m_CanBecomeLost = true;
    4998  }
    4999 
    5000  void ChangeBlockAllocation(
    5001  VmaAllocator hAllocator,
    5002  VmaDeviceMemoryBlock* block,
    5003  VkDeviceSize offset);
    5004 
    5005  void ChangeSize(VkDeviceSize newSize);
    5006  void ChangeOffset(VkDeviceSize newOffset);
    5007 
    5008  // pMappedData not null means allocation is created with MAPPED flag.
    5009  void InitDedicatedAllocation(
    5010  uint32_t memoryTypeIndex,
    5011  VkDeviceMemory hMemory,
    5012  VmaSuballocationType suballocationType,
    5013  void* pMappedData,
    5014  VkDeviceSize size)
    5015  {
    5016  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    5017  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    5018  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    5019  m_Alignment = 0;
    5020  m_Size = size;
    5021  m_SuballocationType = (uint8_t)suballocationType;
    5022  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    5023  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    5024  m_DedicatedAllocation.m_hMemory = hMemory;
    5025  m_DedicatedAllocation.m_pMappedData = pMappedData;
    5026  }
    5027 
    5028  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    5029  VkDeviceSize GetAlignment() const { return m_Alignment; }
    5030  VkDeviceSize GetSize() const { return m_Size; }
    5031  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
    5032  void* GetUserData() const { return m_pUserData; }
    5033  void SetUserData(VmaAllocator hAllocator, void* pUserData);
    5034  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    5035 
    5036  VmaDeviceMemoryBlock* GetBlock() const
    5037  {
    5038  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    5039  return m_BlockAllocation.m_Block;
    5040  }
    5041  VkDeviceSize GetOffset() const;
    5042  VkDeviceMemory GetMemory() const;
    5043  uint32_t GetMemoryTypeIndex() const;
    5044  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    5045  void* GetMappedData() const;
    5046  bool CanBecomeLost() const;
    5047 
    5048  uint32_t GetLastUseFrameIndex() const
    5049  {
    5050  return m_LastUseFrameIndex.load();
    5051  }
    5052  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    5053  {
    5054  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    5055  }
    5056  /*
    5057  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    5058  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    5059  - Else, returns false.
    5060 
    5061  If hAllocation is already lost, assert - you should not call it then.
    5062  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    5063  */
    5064  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5065 
    5066  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    5067  {
    5068  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    5069  outInfo.blockCount = 1;
    5070  outInfo.allocationCount = 1;
    5071  outInfo.unusedRangeCount = 0;
    5072  outInfo.usedBytes = m_Size;
    5073  outInfo.unusedBytes = 0;
    5074  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    5075  outInfo.unusedRangeSizeMin = UINT64_MAX;
    5076  outInfo.unusedRangeSizeMax = 0;
    5077  }
    5078 
    5079  void BlockAllocMap();
    5080  void BlockAllocUnmap();
    5081  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    5082  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    5083 
    5084 #if VMA_STATS_STRING_ENABLED
    5085  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
    5086  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
    5087 
    5088  void InitBufferImageUsage(uint32_t bufferImageUsage)
    5089  {
    5090  VMA_ASSERT(m_BufferImageUsage == 0);
    5091  m_BufferImageUsage = bufferImageUsage;
    5092  }
    5093 
    5094  void PrintParameters(class VmaJsonWriter& json) const;
    5095 #endif
    5096 
    5097 private:
    5098  VkDeviceSize m_Alignment;
    5099  VkDeviceSize m_Size;
    5100  void* m_pUserData;
    5101  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    5102  uint8_t m_Type; // ALLOCATION_TYPE
    5103  uint8_t m_SuballocationType; // VmaSuballocationType
    5104  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    5105  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
    5106  uint8_t m_MapCount;
    5107  uint8_t m_Flags; // enum FLAGS
    5108 
    5109  // Allocation out of VmaDeviceMemoryBlock.
    5110  struct BlockAllocation
    5111  {
    5112  VmaDeviceMemoryBlock* m_Block;
    5113  VkDeviceSize m_Offset;
    5114  bool m_CanBecomeLost;
    5115  };
    5116 
    5117  // Allocation for an object that has its own private VkDeviceMemory.
    5118  struct DedicatedAllocation
    5119  {
    5120  uint32_t m_MemoryTypeIndex;
    5121  VkDeviceMemory m_hMemory;
    5122  void* m_pMappedData; // Not null means memory is mapped.
    5123  };
    5124 
    5125  union
    5126  {
    5127  // Allocation out of VmaDeviceMemoryBlock.
    5128  BlockAllocation m_BlockAllocation;
    5129  // Allocation for an object that has its own private VkDeviceMemory.
    5130  DedicatedAllocation m_DedicatedAllocation;
    5131  };
    5132 
    5133 #if VMA_STATS_STRING_ENABLED
    5134  uint32_t m_CreationFrameIndex;
    5135  uint32_t m_BufferImageUsage; // 0 if unknown.
    5136 #endif
    5137 
    5138  void FreeUserDataString(VmaAllocator hAllocator);
    5139 };
    5140 
    5141 /*
    5142 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    5143 allocated memory block or free.
    5144 */
    5145 struct VmaSuballocation
    5146 {
    5147  VkDeviceSize offset;
    5148  VkDeviceSize size;
    5149  VmaAllocation hAllocation;
    5150  VmaSuballocationType type;
    5151 };
    5152 
    5153 // Comparator for offsets.
    5154 struct VmaSuballocationOffsetLess
    5155 {
    5156  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    5157  {
    5158  return lhs.offset < rhs.offset;
    5159  }
    5160 };
    5161 struct VmaSuballocationOffsetGreater
    5162 {
    5163  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    5164  {
    5165  return lhs.offset > rhs.offset;
    5166  }
    5167 };
    5168 
    5169 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    5170 
    5171 // Cost of one additional allocation lost, as equivalent in bytes.
    5172 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    5173 
    5174 enum class VmaAllocationRequestType
    5175 {
    5176  Normal,
    5177  // Used by "Linear" algorithm.
    5178  UpperAddress,
    5179  EndOf1st,
    5180  EndOf2nd,
    5181 };
    5182 
    5183 /*
    5184 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    5185 
    5186 If canMakeOtherLost was false:
    5187 - item points to a FREE suballocation.
    5188 - itemsToMakeLostCount is 0.
    5189 
    5190 If canMakeOtherLost was true:
    5191 - item points to first of sequence of suballocations, which are either FREE,
    5192  or point to VmaAllocations that can become lost.
    5193 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    5194  the requested allocation to succeed.
    5195 */
    5196 struct VmaAllocationRequest
    5197 {
    5198  VkDeviceSize offset;
    5199  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    5200  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    5201  VmaSuballocationList::iterator item;
    5202  size_t itemsToMakeLostCount;
    5203  void* customData;
    5204  VmaAllocationRequestType type;
    5205 
    5206  VkDeviceSize CalcCost() const
    5207  {
    5208  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    5209  }
    5210 };
    5211 
    5212 /*
    5213 Data structure used for bookkeeping of allocations and unused ranges of memory
    5214 in a single VkDeviceMemory block.
    5215 */
    5216 class VmaBlockMetadata
    5217 {
    5218 public:
    5219  VmaBlockMetadata(VmaAllocator hAllocator);
    5220  virtual ~VmaBlockMetadata() { }
    5221  virtual void Init(VkDeviceSize size) { m_Size = size; }
    5222 
    5223  // Validates all data structures inside this object. If not valid, returns false.
    5224  virtual bool Validate() const = 0;
    5225  VkDeviceSize GetSize() const { return m_Size; }
    5226  virtual size_t GetAllocationCount() const = 0;
    5227  virtual VkDeviceSize GetSumFreeSize() const = 0;
    5228  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
    5229  // Returns true if this block is empty - contains only single free suballocation.
    5230  virtual bool IsEmpty() const = 0;
    5231 
    5232  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
    5233  // Shouldn't modify blockCount.
    5234  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
    5235 
    5236 #if VMA_STATS_STRING_ENABLED
    5237  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
    5238 #endif
    5239 
    5240  // Tries to find a place for suballocation with given parameters inside this block.
    5241  // If succeeded, fills pAllocationRequest and returns true.
    5242  // If failed, returns false.
    5243  virtual bool CreateAllocationRequest(
    5244  uint32_t currentFrameIndex,
    5245  uint32_t frameInUseCount,
    5246  VkDeviceSize bufferImageGranularity,
    5247  VkDeviceSize allocSize,
    5248  VkDeviceSize allocAlignment,
    5249  bool upperAddress,
    5250  VmaSuballocationType allocType,
    5251  bool canMakeOtherLost,
    5252  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
    5253  uint32_t strategy,
    5254  VmaAllocationRequest* pAllocationRequest) = 0;
    5255 
    5256  virtual bool MakeRequestedAllocationsLost(
    5257  uint32_t currentFrameIndex,
    5258  uint32_t frameInUseCount,
    5259  VmaAllocationRequest* pAllocationRequest) = 0;
    5260 
    5261  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
    5262 
    5263  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
    5264 
    5265  // Makes actual allocation based on request. Request must already be checked and valid.
    5266  virtual void Alloc(
    5267  const VmaAllocationRequest& request,
    5268  VmaSuballocationType type,
    5269  VkDeviceSize allocSize,
    5270  VmaAllocation hAllocation) = 0;
    5271 
    5272  // Frees suballocation assigned to given memory region.
    5273  virtual void Free(const VmaAllocation allocation) = 0;
    5274  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
    5275 
    5276  // Tries to resize (grow or shrink) space for given allocation, in place.
    5277  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
    5278 
    5279 protected:
    5280  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
    5281 
    5282 #if VMA_STATS_STRING_ENABLED
    5283  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
    5284  VkDeviceSize unusedBytes,
    5285  size_t allocationCount,
    5286  size_t unusedRangeCount) const;
    5287  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    5288  VkDeviceSize offset,
    5289  VmaAllocation hAllocation) const;
    5290  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    5291  VkDeviceSize offset,
    5292  VkDeviceSize size) const;
    5293  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
    5294 #endif
    5295 
    5296 private:
    5297  VkDeviceSize m_Size;
    5298  const VkAllocationCallbacks* m_pAllocationCallbacks;
    5299 };
    5300 
    5301 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
    5302  VMA_ASSERT(0 && "Validation failed: " #cond); \
    5303  return false; \
    5304  } } while(false)
    5305 
    5306 class VmaBlockMetadata_Generic : public VmaBlockMetadata
    5307 {
    5308  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
    5309 public:
    5310  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
    5311  virtual ~VmaBlockMetadata_Generic();
    5312  virtual void Init(VkDeviceSize size);
    5313 
    5314  virtual bool Validate() const;
    5315  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    5316  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    5317  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5318  virtual bool IsEmpty() const;
    5319 
    5320  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5321  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5322 
    5323 #if VMA_STATS_STRING_ENABLED
    5324  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5325 #endif
    5326 
    5327  virtual bool CreateAllocationRequest(
    5328  uint32_t currentFrameIndex,
    5329  uint32_t frameInUseCount,
    5330  VkDeviceSize bufferImageGranularity,
    5331  VkDeviceSize allocSize,
    5332  VkDeviceSize allocAlignment,
    5333  bool upperAddress,
    5334  VmaSuballocationType allocType,
    5335  bool canMakeOtherLost,
    5336  uint32_t strategy,
    5337  VmaAllocationRequest* pAllocationRequest);
    5338 
    5339  virtual bool MakeRequestedAllocationsLost(
    5340  uint32_t currentFrameIndex,
    5341  uint32_t frameInUseCount,
    5342  VmaAllocationRequest* pAllocationRequest);
    5343 
    5344  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5345 
    5346  virtual VkResult CheckCorruption(const void* pBlockData);
    5347 
    5348  virtual void Alloc(
    5349  const VmaAllocationRequest& request,
    5350  VmaSuballocationType type,
    5351  VkDeviceSize allocSize,
    5352  VmaAllocation hAllocation);
    5353 
    5354  virtual void Free(const VmaAllocation allocation);
    5355  virtual void FreeAtOffset(VkDeviceSize offset);
    5356 
    5357  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
    5358 
    5360  // For defragmentation
    5361 
    5362  bool IsBufferImageGranularityConflictPossible(
    5363  VkDeviceSize bufferImageGranularity,
    5364  VmaSuballocationType& inOutPrevSuballocType) const;
    5365 
    5366 private:
    5367  friend class VmaDefragmentationAlgorithm_Generic;
    5368  friend class VmaDefragmentationAlgorithm_Fast;
    5369 
    5370  uint32_t m_FreeCount;
    5371  VkDeviceSize m_SumFreeSize;
    5372  VmaSuballocationList m_Suballocations;
    5373  // Suballocations that are free and have size greater than certain threshold.
    5374  // Sorted by size, ascending.
    5375  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    5376 
    5377  bool ValidateFreeSuballocationList() const;
    5378 
    5379  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    5380  // If yes, fills pOffset and returns true. If no, returns false.
    5381  bool CheckAllocation(
    5382  uint32_t currentFrameIndex,
    5383  uint32_t frameInUseCount,
    5384  VkDeviceSize bufferImageGranularity,
    5385  VkDeviceSize allocSize,
    5386  VkDeviceSize allocAlignment,
    5387  VmaSuballocationType allocType,
    5388  VmaSuballocationList::const_iterator suballocItem,
    5389  bool canMakeOtherLost,
    5390  VkDeviceSize* pOffset,
    5391  size_t* itemsToMakeLostCount,
    5392  VkDeviceSize* pSumFreeSize,
    5393  VkDeviceSize* pSumItemSize) const;
    5394  // Given free suballocation, it merges it with following one, which must also be free.
    5395  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    5396  // Releases given suballocation, making it free.
    5397  // Merges it with adjacent free suballocations if applicable.
    5398  // Returns iterator to new free suballocation at this place.
    5399  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    5400  // Given free suballocation, it inserts it into sorted list of
    5401  // m_FreeSuballocationsBySize if it's suitable.
    5402  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    5403  // Given free suballocation, it removes it from sorted list of
    5404  // m_FreeSuballocationsBySize if it's suitable.
    5405  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    5406 };
    5407 
    5408 /*
    5409 Allocations and their references in internal data structure look like this:
    5410 
    5411 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
    5412 
    5413  0 +-------+
    5414  | |
    5415  | |
    5416  | |
    5417  +-------+
    5418  | Alloc | 1st[m_1stNullItemsBeginCount]
    5419  +-------+
    5420  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    5421  +-------+
    5422  | ... |
    5423  +-------+
    5424  | Alloc | 1st[1st.size() - 1]
    5425  +-------+
    5426  | |
    5427  | |
    5428  | |
    5429 GetSize() +-------+
    5430 
    5431 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
    5432 
    5433  0 +-------+
    5434  | Alloc | 2nd[0]
    5435  +-------+
    5436  | Alloc | 2nd[1]
    5437  +-------+
    5438  | ... |
    5439  +-------+
    5440  | Alloc | 2nd[2nd.size() - 1]
    5441  +-------+
    5442  | |
    5443  | |
    5444  | |
    5445  +-------+
    5446  | Alloc | 1st[m_1stNullItemsBeginCount]
    5447  +-------+
    5448  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    5449  +-------+
    5450  | ... |
    5451  +-------+
    5452  | Alloc | 1st[1st.size() - 1]
    5453  +-------+
    5454  | |
    5455 GetSize() +-------+
    5456 
    5457 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
    5458 
    5459  0 +-------+
    5460  | |
    5461  | |
    5462  | |
    5463  +-------+
    5464  | Alloc | 1st[m_1stNullItemsBeginCount]
    5465  +-------+
    5466  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    5467  +-------+
    5468  | ... |
    5469  +-------+
    5470  | Alloc | 1st[1st.size() - 1]
    5471  +-------+
    5472  | |
    5473  | |
    5474  | |
    5475  +-------+
    5476  | Alloc | 2nd[2nd.size() - 1]
    5477  +-------+
    5478  | ... |
    5479  +-------+
    5480  | Alloc | 2nd[1]
    5481  +-------+
    5482  | Alloc | 2nd[0]
    5483 GetSize() +-------+
    5484 
    5485 */
    5486 class VmaBlockMetadata_Linear : public VmaBlockMetadata
    5487 {
    5488  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
    5489 public:
    5490  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
    5491  virtual ~VmaBlockMetadata_Linear();
    5492  virtual void Init(VkDeviceSize size);
    5493 
    5494  virtual bool Validate() const;
    5495  virtual size_t GetAllocationCount() const;
    5496  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    5497  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5498  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
    5499 
    5500  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5501  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5502 
    5503 #if VMA_STATS_STRING_ENABLED
    5504  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5505 #endif
    5506 
    5507  virtual bool CreateAllocationRequest(
    5508  uint32_t currentFrameIndex,
    5509  uint32_t frameInUseCount,
    5510  VkDeviceSize bufferImageGranularity,
    5511  VkDeviceSize allocSize,
    5512  VkDeviceSize allocAlignment,
    5513  bool upperAddress,
    5514  VmaSuballocationType allocType,
    5515  bool canMakeOtherLost,
    5516  uint32_t strategy,
    5517  VmaAllocationRequest* pAllocationRequest);
    5518 
    5519  virtual bool MakeRequestedAllocationsLost(
    5520  uint32_t currentFrameIndex,
    5521  uint32_t frameInUseCount,
    5522  VmaAllocationRequest* pAllocationRequest);
    5523 
    5524  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5525 
    5526  virtual VkResult CheckCorruption(const void* pBlockData);
    5527 
    5528  virtual void Alloc(
    5529  const VmaAllocationRequest& request,
    5530  VmaSuballocationType type,
    5531  VkDeviceSize allocSize,
    5532  VmaAllocation hAllocation);
    5533 
    5534  virtual void Free(const VmaAllocation allocation);
    5535  virtual void FreeAtOffset(VkDeviceSize offset);
    5536 
    5537 private:
    5538  /*
    5539  There are two suballocation vectors, used in ping-pong way.
    5540  The one with index m_1stVectorIndex is called 1st.
    5541  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
    5542  2nd can be non-empty only when 1st is not empty.
    5543  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
    5544  */
    5545  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
    5546 
    5547  enum SECOND_VECTOR_MODE
    5548  {
    5549  SECOND_VECTOR_EMPTY,
    5550  /*
    5551  Suballocations in 2nd vector are created later than the ones in 1st, but they
    5552  all have smaller offset.
    5553  */
    5554  SECOND_VECTOR_RING_BUFFER,
    5555  /*
    5556  Suballocations in 2nd vector are upper side of double stack.
    5557  They all have offsets higher than those in 1st vector.
    5558  Top of this stack means smaller offsets, but higher indices in this vector.
    5559  */
    5560  SECOND_VECTOR_DOUBLE_STACK,
    5561  };
    5562 
    5563  VkDeviceSize m_SumFreeSize;
    5564  SuballocationVectorType m_Suballocations0, m_Suballocations1;
    5565  uint32_t m_1stVectorIndex;
    5566  SECOND_VECTOR_MODE m_2ndVectorMode;
    5567 
    5568  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5569  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5570  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5571  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5572 
    5573  // Number of items in 1st vector with hAllocation = null at the beginning.
    5574  size_t m_1stNullItemsBeginCount;
    5575  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
    5576  size_t m_1stNullItemsMiddleCount;
    5577  // Number of items in 2nd vector with hAllocation = null.
    5578  size_t m_2ndNullItemsCount;
    5579 
    5580  bool ShouldCompact1st() const;
    5581  void CleanupAfterFree();
    5582 
    5583  bool CreateAllocationRequest_LowerAddress(
    5584  uint32_t currentFrameIndex,
    5585  uint32_t frameInUseCount,
    5586  VkDeviceSize bufferImageGranularity,
    5587  VkDeviceSize allocSize,
    5588  VkDeviceSize allocAlignment,
    5589  VmaSuballocationType allocType,
    5590  bool canMakeOtherLost,
    5591  uint32_t strategy,
    5592  VmaAllocationRequest* pAllocationRequest);
    5593  bool CreateAllocationRequest_UpperAddress(
    5594  uint32_t currentFrameIndex,
    5595  uint32_t frameInUseCount,
    5596  VkDeviceSize bufferImageGranularity,
    5597  VkDeviceSize allocSize,
    5598  VkDeviceSize allocAlignment,
    5599  VmaSuballocationType allocType,
    5600  bool canMakeOtherLost,
    5601  uint32_t strategy,
    5602  VmaAllocationRequest* pAllocationRequest);
    5603 };
    5604 
    5605 /*
    5606 - GetSize() is the original size of allocated memory block.
    5607 - m_UsableSize is this size aligned down to a power of two.
    5608  All allocations and calculations happen relative to m_UsableSize.
    5609 - GetUnusableSize() is the difference between them.
    5610  It is repoted as separate, unused range, not available for allocations.
    5611 
    5612 Node at level 0 has size = m_UsableSize.
    5613 Each next level contains nodes with size 2 times smaller than current level.
    5614 m_LevelCount is the maximum number of levels to use in the current object.
    5615 */
    5616 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
    5617 {
    5618  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
    5619 public:
    5620  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
    5621  virtual ~VmaBlockMetadata_Buddy();
    5622  virtual void Init(VkDeviceSize size);
    5623 
    5624  virtual bool Validate() const;
    5625  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
    5626  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
    5627  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5628  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
    5629 
    5630  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5631  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5632 
    5633 #if VMA_STATS_STRING_ENABLED
    5634  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5635 #endif
    5636 
    5637  virtual bool CreateAllocationRequest(
    5638  uint32_t currentFrameIndex,
    5639  uint32_t frameInUseCount,
    5640  VkDeviceSize bufferImageGranularity,
    5641  VkDeviceSize allocSize,
    5642  VkDeviceSize allocAlignment,
    5643  bool upperAddress,
    5644  VmaSuballocationType allocType,
    5645  bool canMakeOtherLost,
    5646  uint32_t strategy,
    5647  VmaAllocationRequest* pAllocationRequest);
    5648 
    5649  virtual bool MakeRequestedAllocationsLost(
    5650  uint32_t currentFrameIndex,
    5651  uint32_t frameInUseCount,
    5652  VmaAllocationRequest* pAllocationRequest);
    5653 
    5654  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5655 
    5656  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
    5657 
    5658  virtual void Alloc(
    5659  const VmaAllocationRequest& request,
    5660  VmaSuballocationType type,
    5661  VkDeviceSize allocSize,
    5662  VmaAllocation hAllocation);
    5663 
    5664  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
    5665  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
    5666 
    5667 private:
    5668  static const VkDeviceSize MIN_NODE_SIZE = 32;
    5669  static const size_t MAX_LEVELS = 30;
    5670 
    5671  struct ValidationContext
    5672  {
    5673  size_t calculatedAllocationCount;
    5674  size_t calculatedFreeCount;
    5675  VkDeviceSize calculatedSumFreeSize;
    5676 
    5677  ValidationContext() :
    5678  calculatedAllocationCount(0),
    5679  calculatedFreeCount(0),
    5680  calculatedSumFreeSize(0) { }
    5681  };
    5682 
    5683  struct Node
    5684  {
    5685  VkDeviceSize offset;
    5686  enum TYPE
    5687  {
    5688  TYPE_FREE,
    5689  TYPE_ALLOCATION,
    5690  TYPE_SPLIT,
    5691  TYPE_COUNT
    5692  } type;
    5693  Node* parent;
    5694  Node* buddy;
    5695 
    5696  union
    5697  {
    5698  struct
    5699  {
    5700  Node* prev;
    5701  Node* next;
    5702  } free;
    5703  struct
    5704  {
    5705  VmaAllocation alloc;
    5706  } allocation;
    5707  struct
    5708  {
    5709  Node* leftChild;
    5710  } split;
    5711  };
    5712  };
    5713 
    5714  // Size of the memory block aligned down to a power of two.
    5715  VkDeviceSize m_UsableSize;
    5716  uint32_t m_LevelCount;
    5717 
    5718  Node* m_Root;
    5719  struct {
    5720  Node* front;
    5721  Node* back;
    5722  } m_FreeList[MAX_LEVELS];
    5723  // Number of nodes in the tree with type == TYPE_ALLOCATION.
    5724  size_t m_AllocationCount;
    5725  // Number of nodes in the tree with type == TYPE_FREE.
    5726  size_t m_FreeCount;
    5727  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
    5728  VkDeviceSize m_SumFreeSize;
    5729 
    5730  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
    5731  void DeleteNode(Node* node);
    5732  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
    5733  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
    5734  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
    5735  // Alloc passed just for validation. Can be null.
    5736  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
    5737  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
    5738  // Adds node to the front of FreeList at given level.
    5739  // node->type must be FREE.
    5740  // node->free.prev, next can be undefined.
    5741  void AddToFreeListFront(uint32_t level, Node* node);
    5742  // Removes node from FreeList at given level.
    5743  // node->type must be FREE.
    5744  // node->free.prev, next stay untouched.
    5745  void RemoveFromFreeList(uint32_t level, Node* node);
    5746 
    5747 #if VMA_STATS_STRING_ENABLED
    5748  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
    5749 #endif
    5750 };
    5751 
    5752 /*
    5753 Represents a single block of device memory (`VkDeviceMemory`) with all the
    5754 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
    5755 
    5756 Thread-safety: This class must be externally synchronized.
    5757 */
    5758 class VmaDeviceMemoryBlock
    5759 {
    5760  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
    5761 public:
    5762  VmaBlockMetadata* m_pMetadata;
    5763 
    5764  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    5765 
    5766  ~VmaDeviceMemoryBlock()
    5767  {
    5768  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    5769  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5770  }
    5771 
    5772  // Always call after construction.
    5773  void Init(
    5774  VmaAllocator hAllocator,
    5775  VmaPool hParentPool,
    5776  uint32_t newMemoryTypeIndex,
    5777  VkDeviceMemory newMemory,
    5778  VkDeviceSize newSize,
    5779  uint32_t id,
    5780  uint32_t algorithm);
    5781  // Always call before destruction.
    5782  void Destroy(VmaAllocator allocator);
    5783 
    5784  VmaPool GetParentPool() const { return m_hParentPool; }
    5785  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
    5786  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5787  uint32_t GetId() const { return m_Id; }
    5788  void* GetMappedData() const { return m_pMappedData; }
    5789 
    5790  // Validates all data structures inside this object. If not valid, returns false.
    5791  bool Validate() const;
    5792 
    5793  VkResult CheckCorruption(VmaAllocator hAllocator);
    5794 
    5795  // ppData can be null.
    5796  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
    5797  void Unmap(VmaAllocator hAllocator, uint32_t count);
    5798 
    5799  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5800  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5801 
    5802  VkResult BindBufferMemory(
    5803  const VmaAllocator hAllocator,
    5804  const VmaAllocation hAllocation,
    5805  VkBuffer hBuffer);
    5806  VkResult BindImageMemory(
    5807  const VmaAllocator hAllocator,
    5808  const VmaAllocation hAllocation,
    5809  VkImage hImage);
    5810 
    5811 private:
    5812  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
    5813  uint32_t m_MemoryTypeIndex;
    5814  uint32_t m_Id;
    5815  VkDeviceMemory m_hMemory;
    5816 
    5817  /*
    5818  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
    5819  Also protects m_MapCount, m_pMappedData.
    5820  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
    5821  */
    5822  VMA_MUTEX m_Mutex;
    5823  uint32_t m_MapCount;
    5824  void* m_pMappedData;
    5825 };
    5826 
    5827 struct VmaPointerLess
    5828 {
    5829  bool operator()(const void* lhs, const void* rhs) const
    5830  {
    5831  return lhs < rhs;
    5832  }
    5833 };
    5834 
    5835 struct VmaDefragmentationMove
    5836 {
    5837  size_t srcBlockIndex;
    5838  size_t dstBlockIndex;
    5839  VkDeviceSize srcOffset;
    5840  VkDeviceSize dstOffset;
    5841  VkDeviceSize size;
    5842 };
    5843 
    5844 class VmaDefragmentationAlgorithm;
    5845 
    5846 /*
    5847 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    5848 Vulkan memory type.
    5849 
    5850 Synchronized internally with a mutex.
    5851 */
    5852 struct VmaBlockVector
    5853 {
    5854  VMA_CLASS_NO_COPY(VmaBlockVector)
    5855 public:
    5856  VmaBlockVector(
    5857  VmaAllocator hAllocator,
    5858  VmaPool hParentPool,
    5859  uint32_t memoryTypeIndex,
    5860  VkDeviceSize preferredBlockSize,
    5861  size_t minBlockCount,
    5862  size_t maxBlockCount,
    5863  VkDeviceSize bufferImageGranularity,
    5864  uint32_t frameInUseCount,
    5865  bool isCustomPool,
    5866  bool explicitBlockSize,
    5867  uint32_t algorithm);
    5868  ~VmaBlockVector();
    5869 
    5870  VkResult CreateMinBlocks();
    5871 
    5872  VmaPool GetParentPool() const { return m_hParentPool; }
    5873  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5874  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    5875  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    5876  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    5877  uint32_t GetAlgorithm() const { return m_Algorithm; }
    5878 
    5879  void GetPoolStats(VmaPoolStats* pStats);
    5880 
    5881  bool IsEmpty() const { return m_Blocks.empty(); }
    5882  bool IsCorruptionDetectionEnabled() const;
    5883 
    5884  VkResult Allocate(
    5885  uint32_t currentFrameIndex,
    5886  VkDeviceSize size,
    5887  VkDeviceSize alignment,
    5888  const VmaAllocationCreateInfo& createInfo,
    5889  VmaSuballocationType suballocType,
    5890  size_t allocationCount,
    5891  VmaAllocation* pAllocations);
    5892 
    5893  void Free(
    5894  VmaAllocation hAllocation);
    5895 
    5896  // Adds statistics of this BlockVector to pStats.
    5897  void AddStats(VmaStats* pStats);
    5898 
    5899 #if VMA_STATS_STRING_ENABLED
    5900  void PrintDetailedMap(class VmaJsonWriter& json);
    5901 #endif
    5902 
    5903  void MakePoolAllocationsLost(
    5904  uint32_t currentFrameIndex,
    5905  size_t* pLostAllocationCount);
    5906  VkResult CheckCorruption();
    5907 
    5908  // Saves results in pCtx->res.
    5909  void Defragment(
    5910  class VmaBlockVectorDefragmentationContext* pCtx,
    5911  VmaDefragmentationStats* pStats,
    5912  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
    5913  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
    5914  VkCommandBuffer commandBuffer);
    5915  void DefragmentationEnd(
    5916  class VmaBlockVectorDefragmentationContext* pCtx,
    5917  VmaDefragmentationStats* pStats);
    5918 
    5920  // To be used only while the m_Mutex is locked. Used during defragmentation.
    5921 
    5922  size_t GetBlockCount() const { return m_Blocks.size(); }
    5923  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
    5924  size_t CalcAllocationCount() const;
    5925  bool IsBufferImageGranularityConflictPossible() const;
    5926 
    5927 private:
    5928  friend class VmaDefragmentationAlgorithm_Generic;
    5929 
    5930  const VmaAllocator m_hAllocator;
    5931  const VmaPool m_hParentPool;
    5932  const uint32_t m_MemoryTypeIndex;
    5933  const VkDeviceSize m_PreferredBlockSize;
    5934  const size_t m_MinBlockCount;
    5935  const size_t m_MaxBlockCount;
    5936  const VkDeviceSize m_BufferImageGranularity;
    5937  const uint32_t m_FrameInUseCount;
    5938  const bool m_IsCustomPool;
    5939  const bool m_ExplicitBlockSize;
    5940  const uint32_t m_Algorithm;
    5941  /* There can be at most one allocation that is completely empty - a
    5942  hysteresis to avoid pessimistic case of alternating creation and destruction
    5943  of a VkDeviceMemory. */
    5944  bool m_HasEmptyBlock;
    5945  VMA_RW_MUTEX m_Mutex;
    5946  // Incrementally sorted by sumFreeSize, ascending.
    5947  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    5948  uint32_t m_NextBlockId;
    5949 
    5950  VkDeviceSize CalcMaxBlockSize() const;
    5951 
    5952  // Finds and removes given block from vector.
    5953  void Remove(VmaDeviceMemoryBlock* pBlock);
    5954 
    5955  // Performs single step in sorting m_Blocks. They may not be fully sorted
    5956  // after this call.
    5957  void IncrementallySortBlocks();
    5958 
    5959  VkResult AllocatePage(
    5960  uint32_t currentFrameIndex,
    5961  VkDeviceSize size,
    5962  VkDeviceSize alignment,
    5963  const VmaAllocationCreateInfo& createInfo,
    5964  VmaSuballocationType suballocType,
    5965  VmaAllocation* pAllocation);
    5966 
    5967  // To be used only without CAN_MAKE_OTHER_LOST flag.
    5968  VkResult AllocateFromBlock(
    5969  VmaDeviceMemoryBlock* pBlock,
    5970  uint32_t currentFrameIndex,
    5971  VkDeviceSize size,
    5972  VkDeviceSize alignment,
    5973  VmaAllocationCreateFlags allocFlags,
    5974  void* pUserData,
    5975  VmaSuballocationType suballocType,
    5976  uint32_t strategy,
    5977  VmaAllocation* pAllocation);
    5978 
    5979  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    5980 
    5981  // Saves result to pCtx->res.
    5982  void ApplyDefragmentationMovesCpu(
    5983  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    5984  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
    5985  // Saves result to pCtx->res.
    5986  void ApplyDefragmentationMovesGpu(
    5987  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    5988  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    5989  VkCommandBuffer commandBuffer);
    5990 
    5991  /*
    5992  Used during defragmentation. pDefragmentationStats is optional. It's in/out
    5993  - updated with new data.
    5994  */
    5995  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
    5996 };
    5997 
    5998 struct VmaPool_T
    5999 {
    6000  VMA_CLASS_NO_COPY(VmaPool_T)
    6001 public:
    6002  VmaBlockVector m_BlockVector;
    6003 
    6004  VmaPool_T(
    6005  VmaAllocator hAllocator,
    6006  const VmaPoolCreateInfo& createInfo,
    6007  VkDeviceSize preferredBlockSize);
    6008  ~VmaPool_T();
    6009 
    6010  uint32_t GetId() const { return m_Id; }
    6011  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
    6012 
    6013 #if VMA_STATS_STRING_ENABLED
    6014  //void PrintDetailedMap(class VmaStringBuilder& sb);
    6015 #endif
    6016 
    6017 private:
    6018  uint32_t m_Id;
    6019 };
    6020 
    6021 /*
    6022 Performs defragmentation:
    6023 
    6024 - Updates `pBlockVector->m_pMetadata`.
    6025 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
    6026 - Does not move actual data, only returns requested moves as `moves`.
    6027 */
    6028 class VmaDefragmentationAlgorithm
    6029 {
    6030  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
    6031 public:
    6032  VmaDefragmentationAlgorithm(
    6033  VmaAllocator hAllocator,
    6034  VmaBlockVector* pBlockVector,
    6035  uint32_t currentFrameIndex) :
    6036  m_hAllocator(hAllocator),
    6037  m_pBlockVector(pBlockVector),
    6038  m_CurrentFrameIndex(currentFrameIndex)
    6039  {
    6040  }
    6041  virtual ~VmaDefragmentationAlgorithm()
    6042  {
    6043  }
    6044 
    6045  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
    6046  virtual void AddAll() = 0;
    6047 
    6048  virtual VkResult Defragment(
    6049  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6050  VkDeviceSize maxBytesToMove,
    6051  uint32_t maxAllocationsToMove) = 0;
    6052 
    6053  virtual VkDeviceSize GetBytesMoved() const = 0;
    6054  virtual uint32_t GetAllocationsMoved() const = 0;
    6055 
    6056 protected:
    6057  VmaAllocator const m_hAllocator;
    6058  VmaBlockVector* const m_pBlockVector;
    6059  const uint32_t m_CurrentFrameIndex;
    6060 
    6061  struct AllocationInfo
    6062  {
    6063  VmaAllocation m_hAllocation;
    6064  VkBool32* m_pChanged;
    6065 
    6066  AllocationInfo() :
    6067  m_hAllocation(VK_NULL_HANDLE),
    6068  m_pChanged(VMA_NULL)
    6069  {
    6070  }
    6071  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
    6072  m_hAllocation(hAlloc),
    6073  m_pChanged(pChanged)
    6074  {
    6075  }
    6076  };
    6077 };
    6078 
    6079 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
    6080 {
    6081  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
    6082 public:
    6083  VmaDefragmentationAlgorithm_Generic(
    6084  VmaAllocator hAllocator,
    6085  VmaBlockVector* pBlockVector,
    6086  uint32_t currentFrameIndex,
    6087  bool overlappingMoveSupported);
    6088  virtual ~VmaDefragmentationAlgorithm_Generic();
    6089 
    6090  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    6091  virtual void AddAll() { m_AllAllocations = true; }
    6092 
    6093  virtual VkResult Defragment(
    6094  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6095  VkDeviceSize maxBytesToMove,
    6096  uint32_t maxAllocationsToMove);
    6097 
    6098  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    6099  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    6100 
    6101 private:
    6102  uint32_t m_AllocationCount;
    6103  bool m_AllAllocations;
    6104 
    6105  VkDeviceSize m_BytesMoved;
    6106  uint32_t m_AllocationsMoved;
    6107 
    6108  struct AllocationInfoSizeGreater
    6109  {
    6110  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    6111  {
    6112  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    6113  }
    6114  };
    6115 
    6116  struct AllocationInfoOffsetGreater
    6117  {
    6118  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    6119  {
    6120  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
    6121  }
    6122  };
    6123 
    6124  struct BlockInfo
    6125  {
    6126  size_t m_OriginalBlockIndex;
    6127  VmaDeviceMemoryBlock* m_pBlock;
    6128  bool m_HasNonMovableAllocations;
    6129  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    6130 
    6131  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    6132  m_OriginalBlockIndex(SIZE_MAX),
    6133  m_pBlock(VMA_NULL),
    6134  m_HasNonMovableAllocations(true),
    6135  m_Allocations(pAllocationCallbacks)
    6136  {
    6137  }
    6138 
    6139  void CalcHasNonMovableAllocations()
    6140  {
    6141  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
    6142  const size_t defragmentAllocCount = m_Allocations.size();
    6143  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    6144  }
    6145 
    6146  void SortAllocationsBySizeDescending()
    6147  {
    6148  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    6149  }
    6150 
    6151  void SortAllocationsByOffsetDescending()
    6152  {
    6153  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
    6154  }
    6155  };
    6156 
    6157  struct BlockPointerLess
    6158  {
    6159  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    6160  {
    6161  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    6162  }
    6163  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    6164  {
    6165  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    6166  }
    6167  };
    6168 
    6169  // 1. Blocks with some non-movable allocations go first.
    6170  // 2. Blocks with smaller sumFreeSize go first.
    6171  struct BlockInfoCompareMoveDestination
    6172  {
    6173  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    6174  {
    6175  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    6176  {
    6177  return true;
    6178  }
    6179  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    6180  {
    6181  return false;
    6182  }
    6183  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
    6184  {
    6185  return true;
    6186  }
    6187  return false;
    6188  }
    6189  };
    6190 
    6191  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    6192  BlockInfoVector m_Blocks;
    6193 
    6194  VkResult DefragmentRound(
    6195  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6196  VkDeviceSize maxBytesToMove,
    6197  uint32_t maxAllocationsToMove);
    6198 
    6199  size_t CalcBlocksWithNonMovableCount() const;
    6200 
    6201  static bool MoveMakesSense(
    6202  size_t dstBlockIndex, VkDeviceSize dstOffset,
    6203  size_t srcBlockIndex, VkDeviceSize srcOffset);
    6204 };
    6205 
    6206 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
    6207 {
    6208  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
    6209 public:
    6210  VmaDefragmentationAlgorithm_Fast(
    6211  VmaAllocator hAllocator,
    6212  VmaBlockVector* pBlockVector,
    6213  uint32_t currentFrameIndex,
    6214  bool overlappingMoveSupported);
    6215  virtual ~VmaDefragmentationAlgorithm_Fast();
    6216 
    6217  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
    6218  virtual void AddAll() { m_AllAllocations = true; }
    6219 
    6220  virtual VkResult Defragment(
    6221  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6222  VkDeviceSize maxBytesToMove,
    6223  uint32_t maxAllocationsToMove);
    6224 
    6225  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    6226  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    6227 
    6228 private:
    6229  struct BlockInfo
    6230  {
    6231  size_t origBlockIndex;
    6232  };
    6233 
    6234  class FreeSpaceDatabase
    6235  {
    6236  public:
    6237  FreeSpaceDatabase()
    6238  {
    6239  FreeSpace s = {};
    6240  s.blockInfoIndex = SIZE_MAX;
    6241  for(size_t i = 0; i < MAX_COUNT; ++i)
    6242  {
    6243  m_FreeSpaces[i] = s;
    6244  }
    6245  }
    6246 
    6247  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
    6248  {
    6249  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6250  {
    6251  return;
    6252  }
    6253 
    6254  // Find first invalid or the smallest structure.
    6255  size_t bestIndex = SIZE_MAX;
    6256  for(size_t i = 0; i < MAX_COUNT; ++i)
    6257  {
    6258  // Empty structure.
    6259  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
    6260  {
    6261  bestIndex = i;
    6262  break;
    6263  }
    6264  if(m_FreeSpaces[i].size < size &&
    6265  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
    6266  {
    6267  bestIndex = i;
    6268  }
    6269  }
    6270 
    6271  if(bestIndex != SIZE_MAX)
    6272  {
    6273  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
    6274  m_FreeSpaces[bestIndex].offset = offset;
    6275  m_FreeSpaces[bestIndex].size = size;
    6276  }
    6277  }
    6278 
    6279  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
    6280  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
    6281  {
    6282  size_t bestIndex = SIZE_MAX;
    6283  VkDeviceSize bestFreeSpaceAfter = 0;
    6284  for(size_t i = 0; i < MAX_COUNT; ++i)
    6285  {
    6286  // Structure is valid.
    6287  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
    6288  {
    6289  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
    6290  // Allocation fits into this structure.
    6291  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
    6292  {
    6293  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
    6294  (dstOffset + size);
    6295  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
    6296  {
    6297  bestIndex = i;
    6298  bestFreeSpaceAfter = freeSpaceAfter;
    6299  }
    6300  }
    6301  }
    6302  }
    6303 
    6304  if(bestIndex != SIZE_MAX)
    6305  {
    6306  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
    6307  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
    6308 
    6309  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6310  {
    6311  // Leave this structure for remaining empty space.
    6312  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
    6313  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
    6314  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
    6315  }
    6316  else
    6317  {
    6318  // This structure becomes invalid.
    6319  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
    6320  }
    6321 
    6322  return true;
    6323  }
    6324 
    6325  return false;
    6326  }
    6327 
    6328  private:
    6329  static const size_t MAX_COUNT = 4;
    6330 
    6331  struct FreeSpace
    6332  {
    6333  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
    6334  VkDeviceSize offset;
    6335  VkDeviceSize size;
    6336  } m_FreeSpaces[MAX_COUNT];
    6337  };
    6338 
    6339  const bool m_OverlappingMoveSupported;
    6340 
    6341  uint32_t m_AllocationCount;
    6342  bool m_AllAllocations;
    6343 
    6344  VkDeviceSize m_BytesMoved;
    6345  uint32_t m_AllocationsMoved;
    6346 
    6347  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
    6348 
    6349  void PreprocessMetadata();
    6350  void PostprocessMetadata();
    6351  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
    6352 };
    6353 
    6354 struct VmaBlockDefragmentationContext
    6355 {
    6356  enum BLOCK_FLAG
    6357  {
    6358  BLOCK_FLAG_USED = 0x00000001,
    6359  };
    6360  uint32_t flags;
    6361  VkBuffer hBuffer;
    6362 
    6363  VmaBlockDefragmentationContext() :
    6364  flags(0),
    6365  hBuffer(VK_NULL_HANDLE)
    6366  {
    6367  }
    6368 };
    6369 
    6370 class VmaBlockVectorDefragmentationContext
    6371 {
    6372  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
    6373 public:
    6374  VkResult res;
    6375  bool mutexLocked;
    6376  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
    6377 
    6378  VmaBlockVectorDefragmentationContext(
    6379  VmaAllocator hAllocator,
    6380  VmaPool hCustomPool, // Optional.
    6381  VmaBlockVector* pBlockVector,
    6382  uint32_t currFrameIndex,
    6383  uint32_t flags);
    6384  ~VmaBlockVectorDefragmentationContext();
    6385 
    6386  VmaPool GetCustomPool() const { return m_hCustomPool; }
    6387  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
    6388  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
    6389 
    6390  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    6391  void AddAll() { m_AllAllocations = true; }
    6392 
    6393  void Begin(bool overlappingMoveSupported);
    6394 
    6395 private:
    6396  const VmaAllocator m_hAllocator;
    6397  // Null if not from custom pool.
    6398  const VmaPool m_hCustomPool;
    6399  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
    6400  VmaBlockVector* const m_pBlockVector;
    6401  const uint32_t m_CurrFrameIndex;
    6402  const uint32_t m_AlgorithmFlags;
    6403  // Owner of this object.
    6404  VmaDefragmentationAlgorithm* m_pAlgorithm;
    6405 
    6406  struct AllocInfo
    6407  {
    6408  VmaAllocation hAlloc;
    6409  VkBool32* pChanged;
    6410  };
    6411  // Used between constructor and Begin.
    6412  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
    6413  bool m_AllAllocations;
    6414 };
    6415 
    6416 struct VmaDefragmentationContext_T
    6417 {
    6418 private:
    6419  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
    6420 public:
    6421  VmaDefragmentationContext_T(
    6422  VmaAllocator hAllocator,
    6423  uint32_t currFrameIndex,
    6424  uint32_t flags,
    6425  VmaDefragmentationStats* pStats);
    6426  ~VmaDefragmentationContext_T();
    6427 
    6428  void AddPools(uint32_t poolCount, VmaPool* pPools);
    6429  void AddAllocations(
    6430  uint32_t allocationCount,
    6431  VmaAllocation* pAllocations,
    6432  VkBool32* pAllocationsChanged);
    6433 
    6434  /*
    6435  Returns:
    6436  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
    6437  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
    6438  - Negative value if error occured and object can be destroyed immediately.
    6439  */
    6440  VkResult Defragment(
    6441  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
    6442  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
    6443  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
    6444 
    6445 private:
    6446  const VmaAllocator m_hAllocator;
    6447  const uint32_t m_CurrFrameIndex;
    6448  const uint32_t m_Flags;
    6449  VmaDefragmentationStats* const m_pStats;
    6450  // Owner of these objects.
    6451  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
    6452  // Owner of these objects.
    6453  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
    6454 };
    6455 
    6456 #if VMA_RECORDING_ENABLED
    6457 
    6458 class VmaRecorder
    6459 {
    6460 public:
    6461  VmaRecorder();
    6462  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
    6463  void WriteConfiguration(
    6464  const VkPhysicalDeviceProperties& devProps,
    6465  const VkPhysicalDeviceMemoryProperties& memProps,
    6466  bool dedicatedAllocationExtensionEnabled);
    6467  ~VmaRecorder();
    6468 
    6469  void RecordCreateAllocator(uint32_t frameIndex);
    6470  void RecordDestroyAllocator(uint32_t frameIndex);
    6471  void RecordCreatePool(uint32_t frameIndex,
    6472  const VmaPoolCreateInfo& createInfo,
    6473  VmaPool pool);
    6474  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
    6475  void RecordAllocateMemory(uint32_t frameIndex,
    6476  const VkMemoryRequirements& vkMemReq,
    6477  const VmaAllocationCreateInfo& createInfo,
    6478  VmaAllocation allocation);
    6479  void RecordAllocateMemoryPages(uint32_t frameIndex,
    6480  const VkMemoryRequirements& vkMemReq,
    6481  const VmaAllocationCreateInfo& createInfo,
    6482  uint64_t allocationCount,
    6483  const VmaAllocation* pAllocations);
    6484  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    6485  const VkMemoryRequirements& vkMemReq,
    6486  bool requiresDedicatedAllocation,
    6487  bool prefersDedicatedAllocation,
    6488  const VmaAllocationCreateInfo& createInfo,
    6489  VmaAllocation allocation);
    6490  void RecordAllocateMemoryForImage(uint32_t frameIndex,
    6491  const VkMemoryRequirements& vkMemReq,
    6492  bool requiresDedicatedAllocation,
    6493  bool prefersDedicatedAllocation,
    6494  const VmaAllocationCreateInfo& createInfo,
    6495  VmaAllocation allocation);
    6496  void RecordFreeMemory(uint32_t frameIndex,
    6497  VmaAllocation allocation);
    6498  void RecordFreeMemoryPages(uint32_t frameIndex,
    6499  uint64_t allocationCount,
    6500  const VmaAllocation* pAllocations);
    6501  void RecordResizeAllocation(
    6502  uint32_t frameIndex,
    6503  VmaAllocation allocation,
    6504  VkDeviceSize newSize);
    6505  void RecordSetAllocationUserData(uint32_t frameIndex,
    6506  VmaAllocation allocation,
    6507  const void* pUserData);
    6508  void RecordCreateLostAllocation(uint32_t frameIndex,
    6509  VmaAllocation allocation);
    6510  void RecordMapMemory(uint32_t frameIndex,
    6511  VmaAllocation allocation);
    6512  void RecordUnmapMemory(uint32_t frameIndex,
    6513  VmaAllocation allocation);
    6514  void RecordFlushAllocation(uint32_t frameIndex,
    6515  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    6516  void RecordInvalidateAllocation(uint32_t frameIndex,
    6517  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    6518  void RecordCreateBuffer(uint32_t frameIndex,
    6519  const VkBufferCreateInfo& bufCreateInfo,
    6520  const VmaAllocationCreateInfo& allocCreateInfo,
    6521  VmaAllocation allocation);
    6522  void RecordCreateImage(uint32_t frameIndex,
    6523  const VkImageCreateInfo& imageCreateInfo,
    6524  const VmaAllocationCreateInfo& allocCreateInfo,
    6525  VmaAllocation allocation);
    6526  void RecordDestroyBuffer(uint32_t frameIndex,
    6527  VmaAllocation allocation);
    6528  void RecordDestroyImage(uint32_t frameIndex,
    6529  VmaAllocation allocation);
    6530  void RecordTouchAllocation(uint32_t frameIndex,
    6531  VmaAllocation allocation);
    6532  void RecordGetAllocationInfo(uint32_t frameIndex,
    6533  VmaAllocation allocation);
    6534  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
    6535  VmaPool pool);
    6536  void RecordDefragmentationBegin(uint32_t frameIndex,
    6537  const VmaDefragmentationInfo2& info,
    6539  void RecordDefragmentationEnd(uint32_t frameIndex,
    6541 
    6542 private:
    6543  struct CallParams
    6544  {
    6545  uint32_t threadId;
    6546  double time;
    6547  };
    6548 
    6549  class UserDataString
    6550  {
    6551  public:
    6552  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
    6553  const char* GetString() const { return m_Str; }
    6554 
    6555  private:
    6556  char m_PtrStr[17];
    6557  const char* m_Str;
    6558  };
    6559 
    6560  bool m_UseMutex;
    6561  VmaRecordFlags m_Flags;
    6562  FILE* m_File;
    6563  VMA_MUTEX m_FileMutex;
    6564  int64_t m_Freq;
    6565  int64_t m_StartCounter;
    6566 
    6567  void GetBasicParams(CallParams& outParams);
    6568 
    6569  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
    6570  template<typename T>
    6571  void PrintPointerList(uint64_t count, const T* pItems)
    6572  {
    6573  if(count)
    6574  {
    6575  fprintf(m_File, "%p", pItems[0]);
    6576  for(uint64_t i = 1; i < count; ++i)
    6577  {
    6578  fprintf(m_File, " %p", pItems[i]);
    6579  }
    6580  }
    6581  }
    6582 
    6583  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
    6584  void Flush();
    6585 };
    6586 
    6587 #endif // #if VMA_RECORDING_ENABLED
    6588 
    6589 /*
    6590 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
    6591 */
    6592 class VmaAllocationObjectAllocator
    6593 {
    6594  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
    6595 public:
    6596  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
    6597 
    6598  VmaAllocation Allocate();
    6599  void Free(VmaAllocation hAlloc);
    6600 
    6601 private:
    6602  VMA_MUTEX m_Mutex;
    6603  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
    6604 };
    6605 
    6606 // Main allocator object.
    6607 struct VmaAllocator_T
    6608 {
    6609  VMA_CLASS_NO_COPY(VmaAllocator_T)
    6610 public:
    6611  bool m_UseMutex;
    6612  bool m_UseKhrDedicatedAllocation;
    6613  VkDevice m_hDevice;
    6614  bool m_AllocationCallbacksSpecified;
    6615  VkAllocationCallbacks m_AllocationCallbacks;
    6616  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    6617  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
    6618 
    6619  // Number of bytes free out of limit, or VK_WHOLE_SIZE if no limit for that heap.
    6620  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    6621  VMA_MUTEX m_HeapSizeLimitMutex;
    6622 
    6623  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    6624  VkPhysicalDeviceMemoryProperties m_MemProps;
    6625 
    6626  // Default pools.
    6627  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    6628 
    6629  // Each vector is sorted by memory (handle value).
    6630  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    6631  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    6632  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    6633 
    6634  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    6635  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
    6636  ~VmaAllocator_T();
    6637 
    6638  const VkAllocationCallbacks* GetAllocationCallbacks() const
    6639  {
    6640  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    6641  }
    6642  const VmaVulkanFunctions& GetVulkanFunctions() const
    6643  {
    6644  return m_VulkanFunctions;
    6645  }
    6646 
    6647  VkDeviceSize GetBufferImageGranularity() const
    6648  {
    6649  return VMA_MAX(
    6650  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    6651  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    6652  }
    6653 
    6654  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    6655  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    6656 
    6657  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    6658  {
    6659  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    6660  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    6661  }
    6662  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
    6663  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
    6664  {
    6665  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
    6666  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    6667  }
    6668  // Minimum alignment for all allocations in specific memory type.
    6669  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
    6670  {
    6671  return IsMemoryTypeNonCoherent(memTypeIndex) ?
    6672  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
    6673  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
    6674  }
    6675 
    6676  bool IsIntegratedGpu() const
    6677  {
    6678  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
    6679  }
    6680 
    6681 #if VMA_RECORDING_ENABLED
    6682  VmaRecorder* GetRecorder() const { return m_pRecorder; }
    6683 #endif
    6684 
    6685  void GetBufferMemoryRequirements(
    6686  VkBuffer hBuffer,
    6687  VkMemoryRequirements& memReq,
    6688  bool& requiresDedicatedAllocation,
    6689  bool& prefersDedicatedAllocation) const;
    6690  void GetImageMemoryRequirements(
    6691  VkImage hImage,
    6692  VkMemoryRequirements& memReq,
    6693  bool& requiresDedicatedAllocation,
    6694  bool& prefersDedicatedAllocation) const;
    6695 
    6696  // Main allocation function.
    6697  VkResult AllocateMemory(
    6698  const VkMemoryRequirements& vkMemReq,
    6699  bool requiresDedicatedAllocation,
    6700  bool prefersDedicatedAllocation,
    6701  VkBuffer dedicatedBuffer,
    6702  VkImage dedicatedImage,
    6703  const VmaAllocationCreateInfo& createInfo,
    6704  VmaSuballocationType suballocType,
    6705  size_t allocationCount,
    6706  VmaAllocation* pAllocations);
    6707 
    6708  // Main deallocation function.
    6709  void FreeMemory(
    6710  size_t allocationCount,
    6711  const VmaAllocation* pAllocations);
    6712 
    6713  VkResult ResizeAllocation(
    6714  const VmaAllocation alloc,
    6715  VkDeviceSize newSize);
    6716 
    6717  void CalculateStats(VmaStats* pStats);
    6718 
    6719 #if VMA_STATS_STRING_ENABLED
    6720  void PrintDetailedMap(class VmaJsonWriter& json);
    6721 #endif
    6722 
    6723  VkResult DefragmentationBegin(
    6724  const VmaDefragmentationInfo2& info,
    6725  VmaDefragmentationStats* pStats,
    6726  VmaDefragmentationContext* pContext);
    6727  VkResult DefragmentationEnd(
    6728  VmaDefragmentationContext context);
    6729 
    6730  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    6731  bool TouchAllocation(VmaAllocation hAllocation);
    6732 
    6733  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    6734  void DestroyPool(VmaPool pool);
    6735  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    6736 
    6737  void SetCurrentFrameIndex(uint32_t frameIndex);
    6738  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
    6739 
    6740  void MakePoolAllocationsLost(
    6741  VmaPool hPool,
    6742  size_t* pLostAllocationCount);
    6743  VkResult CheckPoolCorruption(VmaPool hPool);
    6744  VkResult CheckCorruption(uint32_t memoryTypeBits);
    6745 
    6746  void CreateLostAllocation(VmaAllocation* pAllocation);
    6747 
    6748  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    6749  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    6750 
    6751  VkResult Map(VmaAllocation hAllocation, void** ppData);
    6752  void Unmap(VmaAllocation hAllocation);
    6753 
    6754  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
    6755  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
    6756 
    6757  void FlushOrInvalidateAllocation(
    6758  VmaAllocation hAllocation,
    6759  VkDeviceSize offset, VkDeviceSize size,
    6760  VMA_CACHE_OPERATION op);
    6761 
    6762  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
    6763 
    6764 private:
    6765  VkDeviceSize m_PreferredLargeHeapBlockSize;
    6766 
    6767  VkPhysicalDevice m_PhysicalDevice;
    6768  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    6769 
    6770  VMA_RW_MUTEX m_PoolsMutex;
    6771  // Protected by m_PoolsMutex. Sorted by pointer value.
    6772  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    6773  uint32_t m_NextPoolId;
    6774 
    6775  VmaVulkanFunctions m_VulkanFunctions;
    6776 
    6777 #if VMA_RECORDING_ENABLED
    6778  VmaRecorder* m_pRecorder;
    6779 #endif
    6780 
    6781  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    6782 
    6783  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    6784 
    6785  VkResult AllocateMemoryOfType(
    6786  VkDeviceSize size,
    6787  VkDeviceSize alignment,
    6788  bool dedicatedAllocation,
    6789  VkBuffer dedicatedBuffer,
    6790  VkImage dedicatedImage,
    6791  const VmaAllocationCreateInfo& createInfo,
    6792  uint32_t memTypeIndex,
    6793  VmaSuballocationType suballocType,
    6794  size_t allocationCount,
    6795  VmaAllocation* pAllocations);
    6796 
    6797  // Helper function only to be used inside AllocateDedicatedMemory.
    6798  VkResult AllocateDedicatedMemoryPage(
    6799  VkDeviceSize size,
    6800  VmaSuballocationType suballocType,
    6801  uint32_t memTypeIndex,
    6802  const VkMemoryAllocateInfo& allocInfo,
    6803  bool map,
    6804  bool isUserDataString,
    6805  void* pUserData,
    6806  VmaAllocation* pAllocation);
    6807 
    6808  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
    6809  VkResult AllocateDedicatedMemory(
    6810  VkDeviceSize size,
    6811  VmaSuballocationType suballocType,
    6812  uint32_t memTypeIndex,
    6813  bool map,
    6814  bool isUserDataString,
    6815  void* pUserData,
    6816  VkBuffer dedicatedBuffer,
    6817  VkImage dedicatedImage,
    6818  size_t allocationCount,
    6819  VmaAllocation* pAllocations);
    6820 
    6821  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    6822  void FreeDedicatedMemory(VmaAllocation allocation);
    6823 };
    6824 
    6826 // Memory allocation #2 after VmaAllocator_T definition
    6827 
    6828 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    6829 {
    6830  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    6831 }
    6832 
    6833 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    6834 {
    6835  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    6836 }
    6837 
    6838 template<typename T>
    6839 static T* VmaAllocate(VmaAllocator hAllocator)
    6840 {
    6841  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    6842 }
    6843 
    6844 template<typename T>
    6845 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    6846 {
    6847  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    6848 }
    6849 
    6850 template<typename T>
    6851 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    6852 {
    6853  if(ptr != VMA_NULL)
    6854  {
    6855  ptr->~T();
    6856  VmaFree(hAllocator, ptr);
    6857  }
    6858 }
    6859 
    6860 template<typename T>
    6861 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    6862 {
    6863  if(ptr != VMA_NULL)
    6864  {
    6865  for(size_t i = count; i--; )
    6866  ptr[i].~T();
    6867  VmaFree(hAllocator, ptr);
    6868  }
    6869 }
    6870 
    6872 // VmaStringBuilder
    6873 
    6874 #if VMA_STATS_STRING_ENABLED
    6875 
    6876 class VmaStringBuilder
    6877 {
    6878 public:
    6879  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    6880  size_t GetLength() const { return m_Data.size(); }
    6881  const char* GetData() const { return m_Data.data(); }
    6882 
    6883  void Add(char ch) { m_Data.push_back(ch); }
    6884  void Add(const char* pStr);
    6885  void AddNewLine() { Add('\n'); }
    6886  void AddNumber(uint32_t num);
    6887  void AddNumber(uint64_t num);
    6888  void AddPointer(const void* ptr);
    6889 
    6890 private:
    6891  VmaVector< char, VmaStlAllocator<char> > m_Data;
    6892 };
    6893 
    6894 void VmaStringBuilder::Add(const char* pStr)
    6895 {
    6896  const size_t strLen = strlen(pStr);
    6897  if(strLen > 0)
    6898  {
    6899  const size_t oldCount = m_Data.size();
    6900  m_Data.resize(oldCount + strLen);
    6901  memcpy(m_Data.data() + oldCount, pStr, strLen);
    6902  }
    6903 }
    6904 
    6905 void VmaStringBuilder::AddNumber(uint32_t num)
    6906 {
    6907  char buf[11];
    6908  VmaUint32ToStr(buf, sizeof(buf), num);
    6909  Add(buf);
    6910 }
    6911 
    6912 void VmaStringBuilder::AddNumber(uint64_t num)
    6913 {
    6914  char buf[21];
    6915  VmaUint64ToStr(buf, sizeof(buf), num);
    6916  Add(buf);
    6917 }
    6918 
    6919 void VmaStringBuilder::AddPointer(const void* ptr)
    6920 {
    6921  char buf[21];
    6922  VmaPtrToStr(buf, sizeof(buf), ptr);
    6923  Add(buf);
    6924 }
    6925 
    6926 #endif // #if VMA_STATS_STRING_ENABLED
    6927 
    6929 // VmaJsonWriter
    6930 
    6931 #if VMA_STATS_STRING_ENABLED
    6932 
    6933 class VmaJsonWriter
    6934 {
    6935  VMA_CLASS_NO_COPY(VmaJsonWriter)
    6936 public:
    6937  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    6938  ~VmaJsonWriter();
    6939 
    6940  void BeginObject(bool singleLine = false);
    6941  void EndObject();
    6942 
    6943  void BeginArray(bool singleLine = false);
    6944  void EndArray();
    6945 
    6946  void WriteString(const char* pStr);
    6947  void BeginString(const char* pStr = VMA_NULL);
    6948  void ContinueString(const char* pStr);
    6949  void ContinueString(uint32_t n);
    6950  void ContinueString(uint64_t n);
    6951  void ContinueString_Pointer(const void* ptr);
    6952  void EndString(const char* pStr = VMA_NULL);
    6953 
    6954  void WriteNumber(uint32_t n);
    6955  void WriteNumber(uint64_t n);
    6956  void WriteBool(bool b);
    6957  void WriteNull();
    6958 
    6959 private:
    6960  static const char* const INDENT;
    6961 
    6962  enum COLLECTION_TYPE
    6963  {
    6964  COLLECTION_TYPE_OBJECT,
    6965  COLLECTION_TYPE_ARRAY,
    6966  };
    6967  struct StackItem
    6968  {
    6969  COLLECTION_TYPE type;
    6970  uint32_t valueCount;
    6971  bool singleLineMode;
    6972  };
    6973 
    6974  VmaStringBuilder& m_SB;
    6975  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    6976  bool m_InsideString;
    6977 
    6978  void BeginValue(bool isString);
    6979  void WriteIndent(bool oneLess = false);
    6980 };
    6981 
    6982 const char* const VmaJsonWriter::INDENT = " ";
    6983 
    6984 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    6985  m_SB(sb),
    6986  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    6987  m_InsideString(false)
    6988 {
    6989 }
    6990 
    6991 VmaJsonWriter::~VmaJsonWriter()
    6992 {
    6993  VMA_ASSERT(!m_InsideString);
    6994  VMA_ASSERT(m_Stack.empty());
    6995 }
    6996 
    6997 void VmaJsonWriter::BeginObject(bool singleLine)
    6998 {
    6999  VMA_ASSERT(!m_InsideString);
    7000 
    7001  BeginValue(false);
    7002  m_SB.Add('{');
    7003 
    7004  StackItem item;
    7005  item.type = COLLECTION_TYPE_OBJECT;
    7006  item.valueCount = 0;
    7007  item.singleLineMode = singleLine;
    7008  m_Stack.push_back(item);
    7009 }
    7010 
    7011 void VmaJsonWriter::EndObject()
    7012 {
    7013  VMA_ASSERT(!m_InsideString);
    7014 
    7015  WriteIndent(true);
    7016  m_SB.Add('}');
    7017 
    7018  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    7019  m_Stack.pop_back();
    7020 }
    7021 
    7022 void VmaJsonWriter::BeginArray(bool singleLine)
    7023 {
    7024  VMA_ASSERT(!m_InsideString);
    7025 
    7026  BeginValue(false);
    7027  m_SB.Add('[');
    7028 
    7029  StackItem item;
    7030  item.type = COLLECTION_TYPE_ARRAY;
    7031  item.valueCount = 0;
    7032  item.singleLineMode = singleLine;
    7033  m_Stack.push_back(item);
    7034 }
    7035 
    7036 void VmaJsonWriter::EndArray()
    7037 {
    7038  VMA_ASSERT(!m_InsideString);
    7039 
    7040  WriteIndent(true);
    7041  m_SB.Add(']');
    7042 
    7043  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    7044  m_Stack.pop_back();
    7045 }
    7046 
    7047 void VmaJsonWriter::WriteString(const char* pStr)
    7048 {
    7049  BeginString(pStr);
    7050  EndString();
    7051 }
    7052 
    7053 void VmaJsonWriter::BeginString(const char* pStr)
    7054 {
    7055  VMA_ASSERT(!m_InsideString);
    7056 
    7057  BeginValue(true);
    7058  m_SB.Add('"');
    7059  m_InsideString = true;
    7060  if(pStr != VMA_NULL && pStr[0] != '\0')
    7061  {
    7062  ContinueString(pStr);
    7063  }
    7064 }
    7065 
    7066 void VmaJsonWriter::ContinueString(const char* pStr)
    7067 {
    7068  VMA_ASSERT(m_InsideString);
    7069 
    7070  const size_t strLen = strlen(pStr);
    7071  for(size_t i = 0; i < strLen; ++i)
    7072  {
    7073  char ch = pStr[i];
    7074  if(ch == '\\')
    7075  {
    7076  m_SB.Add("\\\\");
    7077  }
    7078  else if(ch == '"')
    7079  {
    7080  m_SB.Add("\\\"");
    7081  }
    7082  else if(ch >= 32)
    7083  {
    7084  m_SB.Add(ch);
    7085  }
    7086  else switch(ch)
    7087  {
    7088  case '\b':
    7089  m_SB.Add("\\b");
    7090  break;
    7091  case '\f':
    7092  m_SB.Add("\\f");
    7093  break;
    7094  case '\n':
    7095  m_SB.Add("\\n");
    7096  break;
    7097  case '\r':
    7098  m_SB.Add("\\r");
    7099  break;
    7100  case '\t':
    7101  m_SB.Add("\\t");
    7102  break;
    7103  default:
    7104  VMA_ASSERT(0 && "Character not currently supported.");
    7105  break;
    7106  }
    7107  }
    7108 }
    7109 
    7110 void VmaJsonWriter::ContinueString(uint32_t n)
    7111 {
    7112  VMA_ASSERT(m_InsideString);
    7113  m_SB.AddNumber(n);
    7114 }
    7115 
    7116 void VmaJsonWriter::ContinueString(uint64_t n)
    7117 {
    7118  VMA_ASSERT(m_InsideString);
    7119  m_SB.AddNumber(n);
    7120 }
    7121 
    7122 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
    7123 {
    7124  VMA_ASSERT(m_InsideString);
    7125  m_SB.AddPointer(ptr);
    7126 }
    7127 
    7128 void VmaJsonWriter::EndString(const char* pStr)
    7129 {
    7130  VMA_ASSERT(m_InsideString);
    7131  if(pStr != VMA_NULL && pStr[0] != '\0')
    7132  {
    7133  ContinueString(pStr);
    7134  }
    7135  m_SB.Add('"');
    7136  m_InsideString = false;
    7137 }
    7138 
    7139 void VmaJsonWriter::WriteNumber(uint32_t n)
    7140 {
    7141  VMA_ASSERT(!m_InsideString);
    7142  BeginValue(false);
    7143  m_SB.AddNumber(n);
    7144 }
    7145 
    7146 void VmaJsonWriter::WriteNumber(uint64_t n)
    7147 {
    7148  VMA_ASSERT(!m_InsideString);
    7149  BeginValue(false);
    7150  m_SB.AddNumber(n);
    7151 }
    7152 
    7153 void VmaJsonWriter::WriteBool(bool b)
    7154 {
    7155  VMA_ASSERT(!m_InsideString);
    7156  BeginValue(false);
    7157  m_SB.Add(b ? "true" : "false");
    7158 }
    7159 
    7160 void VmaJsonWriter::WriteNull()
    7161 {
    7162  VMA_ASSERT(!m_InsideString);
    7163  BeginValue(false);
    7164  m_SB.Add("null");
    7165 }
    7166 
    7167 void VmaJsonWriter::BeginValue(bool isString)
    7168 {
    7169  if(!m_Stack.empty())
    7170  {
    7171  StackItem& currItem = m_Stack.back();
    7172  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    7173  currItem.valueCount % 2 == 0)
    7174  {
    7175  VMA_ASSERT(isString);
    7176  }
    7177 
    7178  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    7179  currItem.valueCount % 2 != 0)
    7180  {
    7181  m_SB.Add(": ");
    7182  }
    7183  else if(currItem.valueCount > 0)
    7184  {
    7185  m_SB.Add(", ");
    7186  WriteIndent();
    7187  }
    7188  else
    7189  {
    7190  WriteIndent();
    7191  }
    7192  ++currItem.valueCount;
    7193  }
    7194 }
    7195 
    7196 void VmaJsonWriter::WriteIndent(bool oneLess)
    7197 {
    7198  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    7199  {
    7200  m_SB.AddNewLine();
    7201 
    7202  size_t count = m_Stack.size();
    7203  if(count > 0 && oneLess)
    7204  {
    7205  --count;
    7206  }
    7207  for(size_t i = 0; i < count; ++i)
    7208  {
    7209  m_SB.Add(INDENT);
    7210  }
    7211  }
    7212 }
    7213 
    7214 #endif // #if VMA_STATS_STRING_ENABLED
    7215 
    7217 
    7218 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
    7219 {
    7220  if(IsUserDataString())
    7221  {
    7222  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
    7223 
    7224  FreeUserDataString(hAllocator);
    7225 
    7226  if(pUserData != VMA_NULL)
    7227  {
    7228  const char* const newStrSrc = (char*)pUserData;
    7229  const size_t newStrLen = strlen(newStrSrc);
    7230  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
    7231  memcpy(newStrDst, newStrSrc, newStrLen + 1);
    7232  m_pUserData = newStrDst;
    7233  }
    7234  }
    7235  else
    7236  {
    7237  m_pUserData = pUserData;
    7238  }
    7239 }
    7240 
    7241 void VmaAllocation_T::ChangeBlockAllocation(
    7242  VmaAllocator hAllocator,
    7243  VmaDeviceMemoryBlock* block,
    7244  VkDeviceSize offset)
    7245 {
    7246  VMA_ASSERT(block != VMA_NULL);
    7247  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    7248 
    7249  // Move mapping reference counter from old block to new block.
    7250  if(block != m_BlockAllocation.m_Block)
    7251  {
    7252  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
    7253  if(IsPersistentMap())
    7254  ++mapRefCount;
    7255  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
    7256  block->Map(hAllocator, mapRefCount, VMA_NULL);
    7257  }
    7258 
    7259  m_BlockAllocation.m_Block = block;
    7260  m_BlockAllocation.m_Offset = offset;
    7261 }
    7262 
    7263 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
    7264 {
    7265  VMA_ASSERT(newSize > 0);
    7266  m_Size = newSize;
    7267 }
    7268 
    7269 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
    7270 {
    7271  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    7272  m_BlockAllocation.m_Offset = newOffset;
    7273 }
    7274 
    7275 VkDeviceSize VmaAllocation_T::GetOffset() const
    7276 {
    7277  switch(m_Type)
    7278  {
    7279  case ALLOCATION_TYPE_BLOCK:
    7280  return m_BlockAllocation.m_Offset;
    7281  case ALLOCATION_TYPE_DEDICATED:
    7282  return 0;
    7283  default:
    7284  VMA_ASSERT(0);
    7285  return 0;
    7286  }
    7287 }
    7288 
    7289 VkDeviceMemory VmaAllocation_T::GetMemory() const
    7290 {
    7291  switch(m_Type)
    7292  {
    7293  case ALLOCATION_TYPE_BLOCK:
    7294  return m_BlockAllocation.m_Block->GetDeviceMemory();
    7295  case ALLOCATION_TYPE_DEDICATED:
    7296  return m_DedicatedAllocation.m_hMemory;
    7297  default:
    7298  VMA_ASSERT(0);
    7299  return VK_NULL_HANDLE;
    7300  }
    7301 }
    7302 
    7303 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    7304 {
    7305  switch(m_Type)
    7306  {
    7307  case ALLOCATION_TYPE_BLOCK:
    7308  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
    7309  case ALLOCATION_TYPE_DEDICATED:
    7310  return m_DedicatedAllocation.m_MemoryTypeIndex;
    7311  default:
    7312  VMA_ASSERT(0);
    7313  return UINT32_MAX;
    7314  }
    7315 }
    7316 
    7317 void* VmaAllocation_T::GetMappedData() const
    7318 {
    7319  switch(m_Type)
    7320  {
    7321  case ALLOCATION_TYPE_BLOCK:
    7322  if(m_MapCount != 0)
    7323  {
    7324  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
    7325  VMA_ASSERT(pBlockData != VMA_NULL);
    7326  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    7327  }
    7328  else
    7329  {
    7330  return VMA_NULL;
    7331  }
    7332  break;
    7333  case ALLOCATION_TYPE_DEDICATED:
    7334  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    7335  return m_DedicatedAllocation.m_pMappedData;
    7336  default:
    7337  VMA_ASSERT(0);
    7338  return VMA_NULL;
    7339  }
    7340 }
    7341 
    7342 bool VmaAllocation_T::CanBecomeLost() const
    7343 {
    7344  switch(m_Type)
    7345  {
    7346  case ALLOCATION_TYPE_BLOCK:
    7347  return m_BlockAllocation.m_CanBecomeLost;
    7348  case ALLOCATION_TYPE_DEDICATED:
    7349  return false;
    7350  default:
    7351  VMA_ASSERT(0);
    7352  return false;
    7353  }
    7354 }
    7355 
    7356 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    7357 {
    7358  VMA_ASSERT(CanBecomeLost());
    7359 
    7360  /*
    7361  Warning: This is a carefully designed algorithm.
    7362  Do not modify unless you really know what you're doing :)
    7363  */
    7364  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    7365  for(;;)
    7366  {
    7367  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    7368  {
    7369  VMA_ASSERT(0);
    7370  return false;
    7371  }
    7372  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    7373  {
    7374  return false;
    7375  }
    7376  else // Last use time earlier than current time.
    7377  {
    7378  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    7379  {
    7380  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    7381  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    7382  return true;
    7383  }
    7384  }
    7385  }
    7386 }
    7387 
    7388 #if VMA_STATS_STRING_ENABLED
    7389 
    7390 // Correspond to values of enum VmaSuballocationType.
    7391 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    7392  "FREE",
    7393  "UNKNOWN",
    7394  "BUFFER",
    7395  "IMAGE_UNKNOWN",
    7396  "IMAGE_LINEAR",
    7397  "IMAGE_OPTIMAL",
    7398 };
    7399 
    7400 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
    7401 {
    7402  json.WriteString("Type");
    7403  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
    7404 
    7405  json.WriteString("Size");
    7406  json.WriteNumber(m_Size);
    7407 
    7408  if(m_pUserData != VMA_NULL)
    7409  {
    7410  json.WriteString("UserData");
    7411  if(IsUserDataString())
    7412  {
    7413  json.WriteString((const char*)m_pUserData);
    7414  }
    7415  else
    7416  {
    7417  json.BeginString();
    7418  json.ContinueString_Pointer(m_pUserData);
    7419  json.EndString();
    7420  }
    7421  }
    7422 
    7423  json.WriteString("CreationFrameIndex");
    7424  json.WriteNumber(m_CreationFrameIndex);
    7425 
    7426  json.WriteString("LastUseFrameIndex");
    7427  json.WriteNumber(GetLastUseFrameIndex());
    7428 
    7429  if(m_BufferImageUsage != 0)
    7430  {
    7431  json.WriteString("Usage");
    7432  json.WriteNumber(m_BufferImageUsage);
    7433  }
    7434 }
    7435 
    7436 #endif
    7437 
    7438 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
    7439 {
    7440  VMA_ASSERT(IsUserDataString());
    7441  if(m_pUserData != VMA_NULL)
    7442  {
    7443  char* const oldStr = (char*)m_pUserData;
    7444  const size_t oldStrLen = strlen(oldStr);
    7445  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
    7446  m_pUserData = VMA_NULL;
    7447  }
    7448 }
    7449 
    7450 void VmaAllocation_T::BlockAllocMap()
    7451 {
    7452  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    7453 
    7454  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    7455  {
    7456  ++m_MapCount;
    7457  }
    7458  else
    7459  {
    7460  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
    7461  }
    7462 }
    7463 
    7464 void VmaAllocation_T::BlockAllocUnmap()
    7465 {
    7466  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    7467 
    7468  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    7469  {
    7470  --m_MapCount;
    7471  }
    7472  else
    7473  {
    7474  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
    7475  }
    7476 }
    7477 
    7478 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    7479 {
    7480  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    7481 
    7482  if(m_MapCount != 0)
    7483  {
    7484  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    7485  {
    7486  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    7487  *ppData = m_DedicatedAllocation.m_pMappedData;
    7488  ++m_MapCount;
    7489  return VK_SUCCESS;
    7490  }
    7491  else
    7492  {
    7493  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    7494  return VK_ERROR_MEMORY_MAP_FAILED;
    7495  }
    7496  }
    7497  else
    7498  {
    7499  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    7500  hAllocator->m_hDevice,
    7501  m_DedicatedAllocation.m_hMemory,
    7502  0, // offset
    7503  VK_WHOLE_SIZE,
    7504  0, // flags
    7505  ppData);
    7506  if(result == VK_SUCCESS)
    7507  {
    7508  m_DedicatedAllocation.m_pMappedData = *ppData;
    7509  m_MapCount = 1;
    7510  }
    7511  return result;
    7512  }
    7513 }
    7514 
    7515 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    7516 {
    7517  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    7518 
    7519  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    7520  {
    7521  --m_MapCount;
    7522  if(m_MapCount == 0)
    7523  {
    7524  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    7525  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    7526  hAllocator->m_hDevice,
    7527  m_DedicatedAllocation.m_hMemory);
    7528  }
    7529  }
    7530  else
    7531  {
    7532  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    7533  }
    7534 }
    7535 
    7536 #if VMA_STATS_STRING_ENABLED
    7537 
    7538 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    7539 {
    7540  json.BeginObject();
    7541 
    7542  json.WriteString("Blocks");
    7543  json.WriteNumber(stat.blockCount);
    7544 
    7545  json.WriteString("Allocations");
    7546  json.WriteNumber(stat.allocationCount);
    7547 
    7548  json.WriteString("UnusedRanges");
    7549  json.WriteNumber(stat.unusedRangeCount);
    7550 
    7551  json.WriteString("UsedBytes");
    7552  json.WriteNumber(stat.usedBytes);
    7553 
    7554  json.WriteString("UnusedBytes");
    7555  json.WriteNumber(stat.unusedBytes);
    7556 
    7557  if(stat.allocationCount > 1)
    7558  {
    7559  json.WriteString("AllocationSize");
    7560  json.BeginObject(true);
    7561  json.WriteString("Min");
    7562  json.WriteNumber(stat.allocationSizeMin);
    7563  json.WriteString("Avg");
    7564  json.WriteNumber(stat.allocationSizeAvg);
    7565  json.WriteString("Max");
    7566  json.WriteNumber(stat.allocationSizeMax);
    7567  json.EndObject();
    7568  }
    7569 
    7570  if(stat.unusedRangeCount > 1)
    7571  {
    7572  json.WriteString("UnusedRangeSize");
    7573  json.BeginObject(true);
    7574  json.WriteString("Min");
    7575  json.WriteNumber(stat.unusedRangeSizeMin);
    7576  json.WriteString("Avg");
    7577  json.WriteNumber(stat.unusedRangeSizeAvg);
    7578  json.WriteString("Max");
    7579  json.WriteNumber(stat.unusedRangeSizeMax);
    7580  json.EndObject();
    7581  }
    7582 
    7583  json.EndObject();
    7584 }
    7585 
    7586 #endif // #if VMA_STATS_STRING_ENABLED
    7587 
    7588 struct VmaSuballocationItemSizeLess
    7589 {
    7590  bool operator()(
    7591  const VmaSuballocationList::iterator lhs,
    7592  const VmaSuballocationList::iterator rhs) const
    7593  {
    7594  return lhs->size < rhs->size;
    7595  }
    7596  bool operator()(
    7597  const VmaSuballocationList::iterator lhs,
    7598  VkDeviceSize rhsSize) const
    7599  {
    7600  return lhs->size < rhsSize;
    7601  }
    7602 };
    7603 
    7604 
    7606 // class VmaBlockMetadata
    7607 
    7608 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    7609  m_Size(0),
    7610  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
    7611 {
    7612 }
    7613 
    7614 #if VMA_STATS_STRING_ENABLED
    7615 
    7616 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
    7617  VkDeviceSize unusedBytes,
    7618  size_t allocationCount,
    7619  size_t unusedRangeCount) const
    7620 {
    7621  json.BeginObject();
    7622 
    7623  json.WriteString("TotalBytes");
    7624  json.WriteNumber(GetSize());
    7625 
    7626  json.WriteString("UnusedBytes");
    7627  json.WriteNumber(unusedBytes);
    7628 
    7629  json.WriteString("Allocations");
    7630  json.WriteNumber((uint64_t)allocationCount);
    7631 
    7632  json.WriteString("UnusedRanges");
    7633  json.WriteNumber((uint64_t)unusedRangeCount);
    7634 
    7635  json.WriteString("Suballocations");
    7636  json.BeginArray();
    7637 }
    7638 
    7639 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    7640  VkDeviceSize offset,
    7641  VmaAllocation hAllocation) const
    7642 {
    7643  json.BeginObject(true);
    7644 
    7645  json.WriteString("Offset");
    7646  json.WriteNumber(offset);
    7647 
    7648  hAllocation->PrintParameters(json);
    7649 
    7650  json.EndObject();
    7651 }
    7652 
    7653 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    7654  VkDeviceSize offset,
    7655  VkDeviceSize size) const
    7656 {
    7657  json.BeginObject(true);
    7658 
    7659  json.WriteString("Offset");
    7660  json.WriteNumber(offset);
    7661 
    7662  json.WriteString("Type");
    7663  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
    7664 
    7665  json.WriteString("Size");
    7666  json.WriteNumber(size);
    7667 
    7668  json.EndObject();
    7669 }
    7670 
    7671 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
    7672 {
    7673  json.EndArray();
    7674  json.EndObject();
    7675 }
    7676 
    7677 #endif // #if VMA_STATS_STRING_ENABLED
    7678 
    7680 // class VmaBlockMetadata_Generic
    7681 
    7682 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
    7683  VmaBlockMetadata(hAllocator),
    7684  m_FreeCount(0),
    7685  m_SumFreeSize(0),
    7686  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7687  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    7688 {
    7689 }
    7690 
    7691 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
    7692 {
    7693 }
    7694 
    7695 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
    7696 {
    7697  VmaBlockMetadata::Init(size);
    7698 
    7699  m_FreeCount = 1;
    7700  m_SumFreeSize = size;
    7701 
    7702  VmaSuballocation suballoc = {};
    7703  suballoc.offset = 0;
    7704  suballoc.size = size;
    7705  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7706  suballoc.hAllocation = VK_NULL_HANDLE;
    7707 
    7708  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    7709  m_Suballocations.push_back(suballoc);
    7710  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    7711  --suballocItem;
    7712  m_FreeSuballocationsBySize.push_back(suballocItem);
    7713 }
    7714 
    7715 bool VmaBlockMetadata_Generic::Validate() const
    7716 {
    7717  VMA_VALIDATE(!m_Suballocations.empty());
    7718 
    7719  // Expected offset of new suballocation as calculated from previous ones.
    7720  VkDeviceSize calculatedOffset = 0;
    7721  // Expected number of free suballocations as calculated from traversing their list.
    7722  uint32_t calculatedFreeCount = 0;
    7723  // Expected sum size of free suballocations as calculated from traversing their list.
    7724  VkDeviceSize calculatedSumFreeSize = 0;
    7725  // Expected number of free suballocations that should be registered in
    7726  // m_FreeSuballocationsBySize calculated from traversing their list.
    7727  size_t freeSuballocationsToRegister = 0;
    7728  // True if previous visited suballocation was free.
    7729  bool prevFree = false;
    7730 
    7731  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    7732  suballocItem != m_Suballocations.cend();
    7733  ++suballocItem)
    7734  {
    7735  const VmaSuballocation& subAlloc = *suballocItem;
    7736 
    7737  // Actual offset of this suballocation doesn't match expected one.
    7738  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
    7739 
    7740  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7741  // Two adjacent free suballocations are invalid. They should be merged.
    7742  VMA_VALIDATE(!prevFree || !currFree);
    7743 
    7744  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
    7745 
    7746  if(currFree)
    7747  {
    7748  calculatedSumFreeSize += subAlloc.size;
    7749  ++calculatedFreeCount;
    7750  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7751  {
    7752  ++freeSuballocationsToRegister;
    7753  }
    7754 
    7755  // Margin required between allocations - every free space must be at least that large.
    7756  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
    7757  }
    7758  else
    7759  {
    7760  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
    7761  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
    7762 
    7763  // Margin required between allocations - previous allocation must be free.
    7764  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
    7765  }
    7766 
    7767  calculatedOffset += subAlloc.size;
    7768  prevFree = currFree;
    7769  }
    7770 
    7771  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    7772  // match expected one.
    7773  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
    7774 
    7775  VkDeviceSize lastSize = 0;
    7776  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    7777  {
    7778  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    7779 
    7780  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    7781  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    7782  // They must be sorted by size ascending.
    7783  VMA_VALIDATE(suballocItem->size >= lastSize);
    7784 
    7785  lastSize = suballocItem->size;
    7786  }
    7787 
    7788  // Check if totals match calculacted values.
    7789  VMA_VALIDATE(ValidateFreeSuballocationList());
    7790  VMA_VALIDATE(calculatedOffset == GetSize());
    7791  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
    7792  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
    7793 
    7794  return true;
    7795 }
    7796 
    7797 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
    7798 {
    7799  if(!m_FreeSuballocationsBySize.empty())
    7800  {
    7801  return m_FreeSuballocationsBySize.back()->size;
    7802  }
    7803  else
    7804  {
    7805  return 0;
    7806  }
    7807 }
    7808 
    7809 bool VmaBlockMetadata_Generic::IsEmpty() const
    7810 {
    7811  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    7812 }
    7813 
    7814 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    7815 {
    7816  outInfo.blockCount = 1;
    7817 
    7818  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    7819  outInfo.allocationCount = rangeCount - m_FreeCount;
    7820  outInfo.unusedRangeCount = m_FreeCount;
    7821 
    7822  outInfo.unusedBytes = m_SumFreeSize;
    7823  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
    7824 
    7825  outInfo.allocationSizeMin = UINT64_MAX;
    7826  outInfo.allocationSizeMax = 0;
    7827  outInfo.unusedRangeSizeMin = UINT64_MAX;
    7828  outInfo.unusedRangeSizeMax = 0;
    7829 
    7830  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    7831  suballocItem != m_Suballocations.cend();
    7832  ++suballocItem)
    7833  {
    7834  const VmaSuballocation& suballoc = *suballocItem;
    7835  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    7836  {
    7837  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    7838  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    7839  }
    7840  else
    7841  {
    7842  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    7843  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    7844  }
    7845  }
    7846 }
    7847 
    7848 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
    7849 {
    7850  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    7851 
    7852  inoutStats.size += GetSize();
    7853  inoutStats.unusedSize += m_SumFreeSize;
    7854  inoutStats.allocationCount += rangeCount - m_FreeCount;
    7855  inoutStats.unusedRangeCount += m_FreeCount;
    7856  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    7857 }
    7858 
    7859 #if VMA_STATS_STRING_ENABLED
    7860 
    7861 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
    7862 {
    7863  PrintDetailedMap_Begin(json,
    7864  m_SumFreeSize, // unusedBytes
    7865  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
    7866  m_FreeCount); // unusedRangeCount
    7867 
    7868  size_t i = 0;
    7869  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    7870  suballocItem != m_Suballocations.cend();
    7871  ++suballocItem, ++i)
    7872  {
    7873  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7874  {
    7875  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
    7876  }
    7877  else
    7878  {
    7879  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
    7880  }
    7881  }
    7882 
    7883  PrintDetailedMap_End(json);
    7884 }
    7885 
    7886 #endif // #if VMA_STATS_STRING_ENABLED
    7887 
    7888 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
    7889  uint32_t currentFrameIndex,
    7890  uint32_t frameInUseCount,
    7891  VkDeviceSize bufferImageGranularity,
    7892  VkDeviceSize allocSize,
    7893  VkDeviceSize allocAlignment,
    7894  bool upperAddress,
    7895  VmaSuballocationType allocType,
    7896  bool canMakeOtherLost,
    7897  uint32_t strategy,
    7898  VmaAllocationRequest* pAllocationRequest)
    7899 {
    7900  VMA_ASSERT(allocSize > 0);
    7901  VMA_ASSERT(!upperAddress);
    7902  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    7903  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    7904  VMA_HEAVY_ASSERT(Validate());
    7905 
    7906  pAllocationRequest->type = VmaAllocationRequestType::Normal;
    7907 
    7908  // There is not enough total free space in this block to fullfill the request: Early return.
    7909  if(canMakeOtherLost == false &&
    7910  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
    7911  {
    7912  return false;
    7913  }
    7914 
    7915  // New algorithm, efficiently searching freeSuballocationsBySize.
    7916  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    7917  if(freeSuballocCount > 0)
    7918  {
    7920  {
    7921  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
    7922  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7923  m_FreeSuballocationsBySize.data(),
    7924  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    7925  allocSize + 2 * VMA_DEBUG_MARGIN,
    7926  VmaSuballocationItemSizeLess());
    7927  size_t index = it - m_FreeSuballocationsBySize.data();
    7928  for(; index < freeSuballocCount; ++index)
    7929  {
    7930  if(CheckAllocation(
    7931  currentFrameIndex,
    7932  frameInUseCount,
    7933  bufferImageGranularity,
    7934  allocSize,
    7935  allocAlignment,
    7936  allocType,
    7937  m_FreeSuballocationsBySize[index],
    7938  false, // canMakeOtherLost
    7939  &pAllocationRequest->offset,
    7940  &pAllocationRequest->itemsToMakeLostCount,
    7941  &pAllocationRequest->sumFreeSize,
    7942  &pAllocationRequest->sumItemSize))
    7943  {
    7944  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    7945  return true;
    7946  }
    7947  }
    7948  }
    7949  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
    7950  {
    7951  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7952  it != m_Suballocations.end();
    7953  ++it)
    7954  {
    7955  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
    7956  currentFrameIndex,
    7957  frameInUseCount,
    7958  bufferImageGranularity,
    7959  allocSize,
    7960  allocAlignment,
    7961  allocType,
    7962  it,
    7963  false, // canMakeOtherLost
    7964  &pAllocationRequest->offset,
    7965  &pAllocationRequest->itemsToMakeLostCount,
    7966  &pAllocationRequest->sumFreeSize,
    7967  &pAllocationRequest->sumItemSize))
    7968  {
    7969  pAllocationRequest->item = it;
    7970  return true;
    7971  }
    7972  }
    7973  }
    7974  else // WORST_FIT, FIRST_FIT
    7975  {
    7976  // Search staring from biggest suballocations.
    7977  for(size_t index = freeSuballocCount; index--; )
    7978  {
    7979  if(CheckAllocation(
    7980  currentFrameIndex,
    7981  frameInUseCount,
    7982  bufferImageGranularity,
    7983  allocSize,
    7984  allocAlignment,
    7985  allocType,
    7986  m_FreeSuballocationsBySize[index],
    7987  false, // canMakeOtherLost
    7988  &pAllocationRequest->offset,
    7989  &pAllocationRequest->itemsToMakeLostCount,
    7990  &pAllocationRequest->sumFreeSize,
    7991  &pAllocationRequest->sumItemSize))
    7992  {
    7993  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    7994  return true;
    7995  }
    7996  }
    7997  }
    7998  }
    7999 
    8000  if(canMakeOtherLost)
    8001  {
    8002  // Brute-force algorithm. TODO: Come up with something better.
    8003 
    8004  bool found = false;
    8005  VmaAllocationRequest tmpAllocRequest = {};
    8006  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
    8007  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    8008  suballocIt != m_Suballocations.end();
    8009  ++suballocIt)
    8010  {
    8011  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    8012  suballocIt->hAllocation->CanBecomeLost())
    8013  {
    8014  if(CheckAllocation(
    8015  currentFrameIndex,
    8016  frameInUseCount,
    8017  bufferImageGranularity,
    8018  allocSize,
    8019  allocAlignment,
    8020  allocType,
    8021  suballocIt,
    8022  canMakeOtherLost,
    8023  &tmpAllocRequest.offset,
    8024  &tmpAllocRequest.itemsToMakeLostCount,
    8025  &tmpAllocRequest.sumFreeSize,
    8026  &tmpAllocRequest.sumItemSize))
    8027  {
    8029  {
    8030  *pAllocationRequest = tmpAllocRequest;
    8031  pAllocationRequest->item = suballocIt;
    8032  break;
    8033  }
    8034  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
    8035  {
    8036  *pAllocationRequest = tmpAllocRequest;
    8037  pAllocationRequest->item = suballocIt;
    8038  found = true;
    8039  }
    8040  }
    8041  }
    8042  }
    8043 
    8044  return found;
    8045  }
    8046 
    8047  return false;
    8048 }
    8049 
    8050 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
    8051  uint32_t currentFrameIndex,
    8052  uint32_t frameInUseCount,
    8053  VmaAllocationRequest* pAllocationRequest)
    8054 {
    8055  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
    8056 
    8057  while(pAllocationRequest->itemsToMakeLostCount > 0)
    8058  {
    8059  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    8060  {
    8061  ++pAllocationRequest->item;
    8062  }
    8063  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    8064  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    8065  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    8066  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8067  {
    8068  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    8069  --pAllocationRequest->itemsToMakeLostCount;
    8070  }
    8071  else
    8072  {
    8073  return false;
    8074  }
    8075  }
    8076 
    8077  VMA_HEAVY_ASSERT(Validate());
    8078  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    8079  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8080 
    8081  return true;
    8082 }
    8083 
    8084 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    8085 {
    8086  uint32_t lostAllocationCount = 0;
    8087  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    8088  it != m_Suballocations.end();
    8089  ++it)
    8090  {
    8091  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    8092  it->hAllocation->CanBecomeLost() &&
    8093  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8094  {
    8095  it = FreeSuballocation(it);
    8096  ++lostAllocationCount;
    8097  }
    8098  }
    8099  return lostAllocationCount;
    8100 }
    8101 
    8102 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
    8103 {
    8104  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    8105  it != m_Suballocations.end();
    8106  ++it)
    8107  {
    8108  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    8109  {
    8110  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
    8111  {
    8112  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    8113  return VK_ERROR_VALIDATION_FAILED_EXT;
    8114  }
    8115  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
    8116  {
    8117  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    8118  return VK_ERROR_VALIDATION_FAILED_EXT;
    8119  }
    8120  }
    8121  }
    8122 
    8123  return VK_SUCCESS;
    8124 }
    8125 
    8126 void VmaBlockMetadata_Generic::Alloc(
    8127  const VmaAllocationRequest& request,
    8128  VmaSuballocationType type,
    8129  VkDeviceSize allocSize,
    8130  VmaAllocation hAllocation)
    8131 {
    8132  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
    8133  VMA_ASSERT(request.item != m_Suballocations.end());
    8134  VmaSuballocation& suballoc = *request.item;
    8135  // Given suballocation is a free block.
    8136  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8137  // Given offset is inside this suballocation.
    8138  VMA_ASSERT(request.offset >= suballoc.offset);
    8139  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    8140  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    8141  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    8142 
    8143  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    8144  // it to become used.
    8145  UnregisterFreeSuballocation(request.item);
    8146 
    8147  suballoc.offset = request.offset;
    8148  suballoc.size = allocSize;
    8149  suballoc.type = type;
    8150  suballoc.hAllocation = hAllocation;
    8151 
    8152  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    8153  if(paddingEnd)
    8154  {
    8155  VmaSuballocation paddingSuballoc = {};
    8156  paddingSuballoc.offset = request.offset + allocSize;
    8157  paddingSuballoc.size = paddingEnd;
    8158  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8159  VmaSuballocationList::iterator next = request.item;
    8160  ++next;
    8161  const VmaSuballocationList::iterator paddingEndItem =
    8162  m_Suballocations.insert(next, paddingSuballoc);
    8163  RegisterFreeSuballocation(paddingEndItem);
    8164  }
    8165 
    8166  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    8167  if(paddingBegin)
    8168  {
    8169  VmaSuballocation paddingSuballoc = {};
    8170  paddingSuballoc.offset = request.offset - paddingBegin;
    8171  paddingSuballoc.size = paddingBegin;
    8172  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8173  const VmaSuballocationList::iterator paddingBeginItem =
    8174  m_Suballocations.insert(request.item, paddingSuballoc);
    8175  RegisterFreeSuballocation(paddingBeginItem);
    8176  }
    8177 
    8178  // Update totals.
    8179  m_FreeCount = m_FreeCount - 1;
    8180  if(paddingBegin > 0)
    8181  {
    8182  ++m_FreeCount;
    8183  }
    8184  if(paddingEnd > 0)
    8185  {
    8186  ++m_FreeCount;
    8187  }
    8188  m_SumFreeSize -= allocSize;
    8189 }
    8190 
    8191 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
    8192 {
    8193  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    8194  suballocItem != m_Suballocations.end();
    8195  ++suballocItem)
    8196  {
    8197  VmaSuballocation& suballoc = *suballocItem;
    8198  if(suballoc.hAllocation == allocation)
    8199  {
    8200  FreeSuballocation(suballocItem);
    8201  VMA_HEAVY_ASSERT(Validate());
    8202  return;
    8203  }
    8204  }
    8205  VMA_ASSERT(0 && "Not found!");
    8206 }
    8207 
    8208 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
    8209 {
    8210  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    8211  suballocItem != m_Suballocations.end();
    8212  ++suballocItem)
    8213  {
    8214  VmaSuballocation& suballoc = *suballocItem;
    8215  if(suballoc.offset == offset)
    8216  {
    8217  FreeSuballocation(suballocItem);
    8218  return;
    8219  }
    8220  }
    8221  VMA_ASSERT(0 && "Not found!");
    8222 }
    8223 
    8224 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
    8225 {
    8226  typedef VmaSuballocationList::iterator iter_type;
    8227  for(iter_type suballocItem = m_Suballocations.begin();
    8228  suballocItem != m_Suballocations.end();
    8229  ++suballocItem)
    8230  {
    8231  VmaSuballocation& suballoc = *suballocItem;
    8232  if(suballoc.hAllocation == alloc)
    8233  {
    8234  iter_type nextItem = suballocItem;
    8235  ++nextItem;
    8236 
    8237  // Should have been ensured on higher level.
    8238  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
    8239 
    8240  // Shrinking.
    8241  if(newSize < alloc->GetSize())
    8242  {
    8243  const VkDeviceSize sizeDiff = suballoc.size - newSize;
    8244 
    8245  // There is next item.
    8246  if(nextItem != m_Suballocations.end())
    8247  {
    8248  // Next item is free.
    8249  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8250  {
    8251  // Grow this next item backward.
    8252  UnregisterFreeSuballocation(nextItem);
    8253  nextItem->offset -= sizeDiff;
    8254  nextItem->size += sizeDiff;
    8255  RegisterFreeSuballocation(nextItem);
    8256  }
    8257  // Next item is not free.
    8258  else
    8259  {
    8260  // Create free item after current one.
    8261  VmaSuballocation newFreeSuballoc;
    8262  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    8263  newFreeSuballoc.offset = suballoc.offset + newSize;
    8264  newFreeSuballoc.size = sizeDiff;
    8265  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8266  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
    8267  RegisterFreeSuballocation(newFreeSuballocIt);
    8268 
    8269  ++m_FreeCount;
    8270  }
    8271  }
    8272  // This is the last item.
    8273  else
    8274  {
    8275  // Create free item at the end.
    8276  VmaSuballocation newFreeSuballoc;
    8277  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    8278  newFreeSuballoc.offset = suballoc.offset + newSize;
    8279  newFreeSuballoc.size = sizeDiff;
    8280  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8281  m_Suballocations.push_back(newFreeSuballoc);
    8282 
    8283  iter_type newFreeSuballocIt = m_Suballocations.end();
    8284  RegisterFreeSuballocation(--newFreeSuballocIt);
    8285 
    8286  ++m_FreeCount;
    8287  }
    8288 
    8289  suballoc.size = newSize;
    8290  m_SumFreeSize += sizeDiff;
    8291  }
    8292  // Growing.
    8293  else
    8294  {
    8295  const VkDeviceSize sizeDiff = newSize - suballoc.size;
    8296 
    8297  // There is next item.
    8298  if(nextItem != m_Suballocations.end())
    8299  {
    8300  // Next item is free.
    8301  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8302  {
    8303  // There is not enough free space, including margin.
    8304  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
    8305  {
    8306  return false;
    8307  }
    8308 
    8309  // There is more free space than required.
    8310  if(nextItem->size > sizeDiff)
    8311  {
    8312  // Move and shrink this next item.
    8313  UnregisterFreeSuballocation(nextItem);
    8314  nextItem->offset += sizeDiff;
    8315  nextItem->size -= sizeDiff;
    8316  RegisterFreeSuballocation(nextItem);
    8317  }
    8318  // There is exactly the amount of free space required.
    8319  else
    8320  {
    8321  // Remove this next free item.
    8322  UnregisterFreeSuballocation(nextItem);
    8323  m_Suballocations.erase(nextItem);
    8324  --m_FreeCount;
    8325  }
    8326  }
    8327  // Next item is not free - there is no space to grow.
    8328  else
    8329  {
    8330  return false;
    8331  }
    8332  }
    8333  // This is the last item - there is no space to grow.
    8334  else
    8335  {
    8336  return false;
    8337  }
    8338 
    8339  suballoc.size = newSize;
    8340  m_SumFreeSize -= sizeDiff;
    8341  }
    8342 
    8343  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
    8344  return true;
    8345  }
    8346  }
    8347  VMA_ASSERT(0 && "Not found!");
    8348  return false;
    8349 }
    8350 
    8351 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
    8352 {
    8353  VkDeviceSize lastSize = 0;
    8354  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    8355  {
    8356  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    8357 
    8358  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
    8359  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    8360  VMA_VALIDATE(it->size >= lastSize);
    8361  lastSize = it->size;
    8362  }
    8363  return true;
    8364 }
    8365 
    8366 bool VmaBlockMetadata_Generic::CheckAllocation(
    8367  uint32_t currentFrameIndex,
    8368  uint32_t frameInUseCount,
    8369  VkDeviceSize bufferImageGranularity,
    8370  VkDeviceSize allocSize,
    8371  VkDeviceSize allocAlignment,
    8372  VmaSuballocationType allocType,
    8373  VmaSuballocationList::const_iterator suballocItem,
    8374  bool canMakeOtherLost,
    8375  VkDeviceSize* pOffset,
    8376  size_t* itemsToMakeLostCount,
    8377  VkDeviceSize* pSumFreeSize,
    8378  VkDeviceSize* pSumItemSize) const
    8379 {
    8380  VMA_ASSERT(allocSize > 0);
    8381  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    8382  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    8383  VMA_ASSERT(pOffset != VMA_NULL);
    8384 
    8385  *itemsToMakeLostCount = 0;
    8386  *pSumFreeSize = 0;
    8387  *pSumItemSize = 0;
    8388 
    8389  if(canMakeOtherLost)
    8390  {
    8391  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8392  {
    8393  *pSumFreeSize = suballocItem->size;
    8394  }
    8395  else
    8396  {
    8397  if(suballocItem->hAllocation->CanBecomeLost() &&
    8398  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8399  {
    8400  ++*itemsToMakeLostCount;
    8401  *pSumItemSize = suballocItem->size;
    8402  }
    8403  else
    8404  {
    8405  return false;
    8406  }
    8407  }
    8408 
    8409  // Remaining size is too small for this request: Early return.
    8410  if(GetSize() - suballocItem->offset < allocSize)
    8411  {
    8412  return false;
    8413  }
    8414 
    8415  // Start from offset equal to beginning of this suballocation.
    8416  *pOffset = suballocItem->offset;
    8417 
    8418  // Apply VMA_DEBUG_MARGIN at the beginning.
    8419  if(VMA_DEBUG_MARGIN > 0)
    8420  {
    8421  *pOffset += VMA_DEBUG_MARGIN;
    8422  }
    8423 
    8424  // Apply alignment.
    8425  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    8426 
    8427  // Check previous suballocations for BufferImageGranularity conflicts.
    8428  // Make bigger alignment if necessary.
    8429  if(bufferImageGranularity > 1)
    8430  {
    8431  bool bufferImageGranularityConflict = false;
    8432  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    8433  while(prevSuballocItem != m_Suballocations.cbegin())
    8434  {
    8435  --prevSuballocItem;
    8436  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    8437  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    8438  {
    8439  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8440  {
    8441  bufferImageGranularityConflict = true;
    8442  break;
    8443  }
    8444  }
    8445  else
    8446  // Already on previous page.
    8447  break;
    8448  }
    8449  if(bufferImageGranularityConflict)
    8450  {
    8451  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    8452  }
    8453  }
    8454 
    8455  // Now that we have final *pOffset, check if we are past suballocItem.
    8456  // If yes, return false - this function should be called for another suballocItem as starting point.
    8457  if(*pOffset >= suballocItem->offset + suballocItem->size)
    8458  {
    8459  return false;
    8460  }
    8461 
    8462  // Calculate padding at the beginning based on current offset.
    8463  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    8464 
    8465  // Calculate required margin at the end.
    8466  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    8467 
    8468  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    8469  // Another early return check.
    8470  if(suballocItem->offset + totalSize > GetSize())
    8471  {
    8472  return false;
    8473  }
    8474 
    8475  // Advance lastSuballocItem until desired size is reached.
    8476  // Update itemsToMakeLostCount.
    8477  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    8478  if(totalSize > suballocItem->size)
    8479  {
    8480  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    8481  while(remainingSize > 0)
    8482  {
    8483  ++lastSuballocItem;
    8484  if(lastSuballocItem == m_Suballocations.cend())
    8485  {
    8486  return false;
    8487  }
    8488  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8489  {
    8490  *pSumFreeSize += lastSuballocItem->size;
    8491  }
    8492  else
    8493  {
    8494  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    8495  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    8496  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8497  {
    8498  ++*itemsToMakeLostCount;
    8499  *pSumItemSize += lastSuballocItem->size;
    8500  }
    8501  else
    8502  {
    8503  return false;
    8504  }
    8505  }
    8506  remainingSize = (lastSuballocItem->size < remainingSize) ?
    8507  remainingSize - lastSuballocItem->size : 0;
    8508  }
    8509  }
    8510 
    8511  // Check next suballocations for BufferImageGranularity conflicts.
    8512  // If conflict exists, we must mark more allocations lost or fail.
    8513  if(bufferImageGranularity > 1)
    8514  {
    8515  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    8516  ++nextSuballocItem;
    8517  while(nextSuballocItem != m_Suballocations.cend())
    8518  {
    8519  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    8520  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8521  {
    8522  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8523  {
    8524  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    8525  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    8526  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8527  {
    8528  ++*itemsToMakeLostCount;
    8529  }
    8530  else
    8531  {
    8532  return false;
    8533  }
    8534  }
    8535  }
    8536  else
    8537  {
    8538  // Already on next page.
    8539  break;
    8540  }
    8541  ++nextSuballocItem;
    8542  }
    8543  }
    8544  }
    8545  else
    8546  {
    8547  const VmaSuballocation& suballoc = *suballocItem;
    8548  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8549 
    8550  *pSumFreeSize = suballoc.size;
    8551 
    8552  // Size of this suballocation is too small for this request: Early return.
    8553  if(suballoc.size < allocSize)
    8554  {
    8555  return false;
    8556  }
    8557 
    8558  // Start from offset equal to beginning of this suballocation.
    8559  *pOffset = suballoc.offset;
    8560 
    8561  // Apply VMA_DEBUG_MARGIN at the beginning.
    8562  if(VMA_DEBUG_MARGIN > 0)
    8563  {
    8564  *pOffset += VMA_DEBUG_MARGIN;
    8565  }
    8566 
    8567  // Apply alignment.
    8568  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    8569 
    8570  // Check previous suballocations for BufferImageGranularity conflicts.
    8571  // Make bigger alignment if necessary.
    8572  if(bufferImageGranularity > 1)
    8573  {
    8574  bool bufferImageGranularityConflict = false;
    8575  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    8576  while(prevSuballocItem != m_Suballocations.cbegin())
    8577  {
    8578  --prevSuballocItem;
    8579  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    8580  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    8581  {
    8582  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8583  {
    8584  bufferImageGranularityConflict = true;
    8585  break;
    8586  }
    8587  }
    8588  else
    8589  // Already on previous page.
    8590  break;
    8591  }
    8592  if(bufferImageGranularityConflict)
    8593  {
    8594  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    8595  }
    8596  }
    8597 
    8598  // Calculate padding at the beginning based on current offset.
    8599  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    8600 
    8601  // Calculate required margin at the end.
    8602  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    8603 
    8604  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    8605  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    8606  {
    8607  return false;
    8608  }
    8609 
    8610  // Check next suballocations for BufferImageGranularity conflicts.
    8611  // If conflict exists, allocation cannot be made here.
    8612  if(bufferImageGranularity > 1)
    8613  {
    8614  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    8615  ++nextSuballocItem;
    8616  while(nextSuballocItem != m_Suballocations.cend())
    8617  {
    8618  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    8619  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8620  {
    8621  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8622  {
    8623  return false;
    8624  }
    8625  }
    8626  else
    8627  {
    8628  // Already on next page.
    8629  break;
    8630  }
    8631  ++nextSuballocItem;
    8632  }
    8633  }
    8634  }
    8635 
    8636  // All tests passed: Success. pOffset is already filled.
    8637  return true;
    8638 }
    8639 
    8640 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
    8641 {
    8642  VMA_ASSERT(item != m_Suballocations.end());
    8643  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8644 
    8645  VmaSuballocationList::iterator nextItem = item;
    8646  ++nextItem;
    8647  VMA_ASSERT(nextItem != m_Suballocations.end());
    8648  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    8649 
    8650  item->size += nextItem->size;
    8651  --m_FreeCount;
    8652  m_Suballocations.erase(nextItem);
    8653 }
    8654 
    8655 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    8656 {
    8657  // Change this suballocation to be marked as free.
    8658  VmaSuballocation& suballoc = *suballocItem;
    8659  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8660  suballoc.hAllocation = VK_NULL_HANDLE;
    8661 
    8662  // Update totals.
    8663  ++m_FreeCount;
    8664  m_SumFreeSize += suballoc.size;
    8665 
    8666  // Merge with previous and/or next suballocation if it's also free.
    8667  bool mergeWithNext = false;
    8668  bool mergeWithPrev = false;
    8669 
    8670  VmaSuballocationList::iterator nextItem = suballocItem;
    8671  ++nextItem;
    8672  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    8673  {
    8674  mergeWithNext = true;
    8675  }
    8676 
    8677  VmaSuballocationList::iterator prevItem = suballocItem;
    8678  if(suballocItem != m_Suballocations.begin())
    8679  {
    8680  --prevItem;
    8681  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8682  {
    8683  mergeWithPrev = true;
    8684  }
    8685  }
    8686 
    8687  if(mergeWithNext)
    8688  {
    8689  UnregisterFreeSuballocation(nextItem);
    8690  MergeFreeWithNext(suballocItem);
    8691  }
    8692 
    8693  if(mergeWithPrev)
    8694  {
    8695  UnregisterFreeSuballocation(prevItem);
    8696  MergeFreeWithNext(prevItem);
    8697  RegisterFreeSuballocation(prevItem);
    8698  return prevItem;
    8699  }
    8700  else
    8701  {
    8702  RegisterFreeSuballocation(suballocItem);
    8703  return suballocItem;
    8704  }
    8705 }
    8706 
    8707 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    8708 {
    8709  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8710  VMA_ASSERT(item->size > 0);
    8711 
    8712  // You may want to enable this validation at the beginning or at the end of
    8713  // this function, depending on what do you want to check.
    8714  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8715 
    8716  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    8717  {
    8718  if(m_FreeSuballocationsBySize.empty())
    8719  {
    8720  m_FreeSuballocationsBySize.push_back(item);
    8721  }
    8722  else
    8723  {
    8724  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    8725  }
    8726  }
    8727 
    8728  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8729 }
    8730 
    8731 
    8732 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    8733 {
    8734  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8735  VMA_ASSERT(item->size > 0);
    8736 
    8737  // You may want to enable this validation at the beginning or at the end of
    8738  // this function, depending on what do you want to check.
    8739  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8740 
    8741  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    8742  {
    8743  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    8744  m_FreeSuballocationsBySize.data(),
    8745  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    8746  item,
    8747  VmaSuballocationItemSizeLess());
    8748  for(size_t index = it - m_FreeSuballocationsBySize.data();
    8749  index < m_FreeSuballocationsBySize.size();
    8750  ++index)
    8751  {
    8752  if(m_FreeSuballocationsBySize[index] == item)
    8753  {
    8754  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    8755  return;
    8756  }
    8757  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    8758  }
    8759  VMA_ASSERT(0 && "Not found.");
    8760  }
    8761 
    8762  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8763 }
    8764 
    8765 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
    8766  VkDeviceSize bufferImageGranularity,
    8767  VmaSuballocationType& inOutPrevSuballocType) const
    8768 {
    8769  if(bufferImageGranularity == 1 || IsEmpty())
    8770  {
    8771  return false;
    8772  }
    8773 
    8774  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
    8775  bool typeConflictFound = false;
    8776  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
    8777  it != m_Suballocations.cend();
    8778  ++it)
    8779  {
    8780  const VmaSuballocationType suballocType = it->type;
    8781  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
    8782  {
    8783  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
    8784  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
    8785  {
    8786  typeConflictFound = true;
    8787  }
    8788  inOutPrevSuballocType = suballocType;
    8789  }
    8790  }
    8791 
    8792  return typeConflictFound || minAlignment >= bufferImageGranularity;
    8793 }
    8794 
    8796 // class VmaBlockMetadata_Linear
    8797 
    8798 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
    8799  VmaBlockMetadata(hAllocator),
    8800  m_SumFreeSize(0),
    8801  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    8802  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    8803  m_1stVectorIndex(0),
    8804  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
    8805  m_1stNullItemsBeginCount(0),
    8806  m_1stNullItemsMiddleCount(0),
    8807  m_2ndNullItemsCount(0)
    8808 {
    8809 }
    8810 
    8811 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
    8812 {
    8813 }
    8814 
    8815 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
    8816 {
    8817  VmaBlockMetadata::Init(size);
    8818  m_SumFreeSize = size;
    8819 }
    8820 
    8821 bool VmaBlockMetadata_Linear::Validate() const
    8822 {
    8823  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8824  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8825 
    8826  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
    8827  VMA_VALIDATE(!suballocations1st.empty() ||
    8828  suballocations2nd.empty() ||
    8829  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
    8830 
    8831  if(!suballocations1st.empty())
    8832  {
    8833  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
    8834  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
    8835  // Null item at the end should be just pop_back().
    8836  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
    8837  }
    8838  if(!suballocations2nd.empty())
    8839  {
    8840  // Null item at the end should be just pop_back().
    8841  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
    8842  }
    8843 
    8844  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
    8845  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
    8846 
    8847  VkDeviceSize sumUsedSize = 0;
    8848  const size_t suballoc1stCount = suballocations1st.size();
    8849  VkDeviceSize offset = VMA_DEBUG_MARGIN;
    8850 
    8851  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8852  {
    8853  const size_t suballoc2ndCount = suballocations2nd.size();
    8854  size_t nullItem2ndCount = 0;
    8855  for(size_t i = 0; i < suballoc2ndCount; ++i)
    8856  {
    8857  const VmaSuballocation& suballoc = suballocations2nd[i];
    8858  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8859 
    8860  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    8861  VMA_VALIDATE(suballoc.offset >= offset);
    8862 
    8863  if(!currFree)
    8864  {
    8865  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    8866  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    8867  sumUsedSize += suballoc.size;
    8868  }
    8869  else
    8870  {
    8871  ++nullItem2ndCount;
    8872  }
    8873 
    8874  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    8875  }
    8876 
    8877  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    8878  }
    8879 
    8880  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
    8881  {
    8882  const VmaSuballocation& suballoc = suballocations1st[i];
    8883  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
    8884  suballoc.hAllocation == VK_NULL_HANDLE);
    8885  }
    8886 
    8887  size_t nullItem1stCount = m_1stNullItemsBeginCount;
    8888 
    8889  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
    8890  {
    8891  const VmaSuballocation& suballoc = suballocations1st[i];
    8892  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8893 
    8894  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    8895  VMA_VALIDATE(suballoc.offset >= offset);
    8896  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
    8897 
    8898  if(!currFree)
    8899  {
    8900  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    8901  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    8902  sumUsedSize += suballoc.size;
    8903  }
    8904  else
    8905  {
    8906  ++nullItem1stCount;
    8907  }
    8908 
    8909  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    8910  }
    8911  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
    8912 
    8913  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8914  {
    8915  const size_t suballoc2ndCount = suballocations2nd.size();
    8916  size_t nullItem2ndCount = 0;
    8917  for(size_t i = suballoc2ndCount; i--; )
    8918  {
    8919  const VmaSuballocation& suballoc = suballocations2nd[i];
    8920  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8921 
    8922  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    8923  VMA_VALIDATE(suballoc.offset >= offset);
    8924 
    8925  if(!currFree)
    8926  {
    8927  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    8928  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    8929  sumUsedSize += suballoc.size;
    8930  }
    8931  else
    8932  {
    8933  ++nullItem2ndCount;
    8934  }
    8935 
    8936  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    8937  }
    8938 
    8939  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    8940  }
    8941 
    8942  VMA_VALIDATE(offset <= GetSize());
    8943  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
    8944 
    8945  return true;
    8946 }
    8947 
    8948 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
    8949 {
    8950  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
    8951  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
    8952 }
    8953 
    8954 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
    8955 {
    8956  const VkDeviceSize size = GetSize();
    8957 
    8958  /*
    8959  We don't consider gaps inside allocation vectors with freed allocations because
    8960  they are not suitable for reuse in linear allocator. We consider only space that
    8961  is available for new allocations.
    8962  */
    8963  if(IsEmpty())
    8964  {
    8965  return size;
    8966  }
    8967 
    8968  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8969 
    8970  switch(m_2ndVectorMode)
    8971  {
    8972  case SECOND_VECTOR_EMPTY:
    8973  /*
    8974  Available space is after end of 1st, as well as before beginning of 1st (which
    8975  whould make it a ring buffer).
    8976  */
    8977  {
    8978  const size_t suballocations1stCount = suballocations1st.size();
    8979  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
    8980  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    8981  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
    8982  return VMA_MAX(
    8983  firstSuballoc.offset,
    8984  size - (lastSuballoc.offset + lastSuballoc.size));
    8985  }
    8986  break;
    8987 
    8988  case SECOND_VECTOR_RING_BUFFER:
    8989  /*
    8990  Available space is only between end of 2nd and beginning of 1st.
    8991  */
    8992  {
    8993  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8994  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
    8995  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
    8996  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
    8997  }
    8998  break;
    8999 
    9000  case SECOND_VECTOR_DOUBLE_STACK:
    9001  /*
    9002  Available space is only between end of 1st and top of 2nd.
    9003  */
    9004  {
    9005  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9006  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
    9007  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
    9008  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
    9009  }
    9010  break;
    9011 
    9012  default:
    9013  VMA_ASSERT(0);
    9014  return 0;
    9015  }
    9016 }
    9017 
    9018 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    9019 {
    9020  const VkDeviceSize size = GetSize();
    9021  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9022  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9023  const size_t suballoc1stCount = suballocations1st.size();
    9024  const size_t suballoc2ndCount = suballocations2nd.size();
    9025 
    9026  outInfo.blockCount = 1;
    9027  outInfo.allocationCount = (uint32_t)GetAllocationCount();
    9028  outInfo.unusedRangeCount = 0;
    9029  outInfo.usedBytes = 0;
    9030  outInfo.allocationSizeMin = UINT64_MAX;
    9031  outInfo.allocationSizeMax = 0;
    9032  outInfo.unusedRangeSizeMin = UINT64_MAX;
    9033  outInfo.unusedRangeSizeMax = 0;
    9034 
    9035  VkDeviceSize lastOffset = 0;
    9036 
    9037  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9038  {
    9039  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9040  size_t nextAlloc2ndIndex = 0;
    9041  while(lastOffset < freeSpace2ndTo1stEnd)
    9042  {
    9043  // Find next non-null allocation or move nextAllocIndex to the end.
    9044  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9045  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9046  {
    9047  ++nextAlloc2ndIndex;
    9048  }
    9049 
    9050  // Found non-null allocation.
    9051  if(nextAlloc2ndIndex < suballoc2ndCount)
    9052  {
    9053  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9054 
    9055  // 1. Process free space before this allocation.
    9056  if(lastOffset < suballoc.offset)
    9057  {
    9058  // There is free space from lastOffset to suballoc.offset.
    9059  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9060  ++outInfo.unusedRangeCount;
    9061  outInfo.unusedBytes += unusedRangeSize;
    9062  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9063  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9064  }
    9065 
    9066  // 2. Process this allocation.
    9067  // There is allocation with suballoc.offset, suballoc.size.
    9068  outInfo.usedBytes += suballoc.size;
    9069  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    9070  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    9071 
    9072  // 3. Prepare for next iteration.
    9073  lastOffset = suballoc.offset + suballoc.size;
    9074  ++nextAlloc2ndIndex;
    9075  }
    9076  // We are at the end.
    9077  else
    9078  {
    9079  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9080  if(lastOffset < freeSpace2ndTo1stEnd)
    9081  {
    9082  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    9083  ++outInfo.unusedRangeCount;
    9084  outInfo.unusedBytes += unusedRangeSize;
    9085  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9086  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9087  }
    9088 
    9089  // End of loop.
    9090  lastOffset = freeSpace2ndTo1stEnd;
    9091  }
    9092  }
    9093  }
    9094 
    9095  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9096  const VkDeviceSize freeSpace1stTo2ndEnd =
    9097  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    9098  while(lastOffset < freeSpace1stTo2ndEnd)
    9099  {
    9100  // Find next non-null allocation or move nextAllocIndex to the end.
    9101  while(nextAlloc1stIndex < suballoc1stCount &&
    9102  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9103  {
    9104  ++nextAlloc1stIndex;
    9105  }
    9106 
    9107  // Found non-null allocation.
    9108  if(nextAlloc1stIndex < suballoc1stCount)
    9109  {
    9110  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9111 
    9112  // 1. Process free space before this allocation.
    9113  if(lastOffset < suballoc.offset)
    9114  {
    9115  // There is free space from lastOffset to suballoc.offset.
    9116  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9117  ++outInfo.unusedRangeCount;
    9118  outInfo.unusedBytes += unusedRangeSize;
    9119  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9120  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9121  }
    9122 
    9123  // 2. Process this allocation.
    9124  // There is allocation with suballoc.offset, suballoc.size.
    9125  outInfo.usedBytes += suballoc.size;
    9126  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    9127  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    9128 
    9129  // 3. Prepare for next iteration.
    9130  lastOffset = suballoc.offset + suballoc.size;
    9131  ++nextAlloc1stIndex;
    9132  }
    9133  // We are at the end.
    9134  else
    9135  {
    9136  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9137  if(lastOffset < freeSpace1stTo2ndEnd)
    9138  {
    9139  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    9140  ++outInfo.unusedRangeCount;
    9141  outInfo.unusedBytes += unusedRangeSize;
    9142  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9143  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9144  }
    9145 
    9146  // End of loop.
    9147  lastOffset = freeSpace1stTo2ndEnd;
    9148  }
    9149  }
    9150 
    9151  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9152  {
    9153  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9154  while(lastOffset < size)
    9155  {
    9156  // Find next non-null allocation or move nextAllocIndex to the end.
    9157  while(nextAlloc2ndIndex != SIZE_MAX &&
    9158  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9159  {
    9160  --nextAlloc2ndIndex;
    9161  }
    9162 
    9163  // Found non-null allocation.
    9164  if(nextAlloc2ndIndex != SIZE_MAX)
    9165  {
    9166  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9167 
    9168  // 1. Process free space before this allocation.
    9169  if(lastOffset < suballoc.offset)
    9170  {
    9171  // There is free space from lastOffset to suballoc.offset.
    9172  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9173  ++outInfo.unusedRangeCount;
    9174  outInfo.unusedBytes += unusedRangeSize;
    9175  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9176  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9177  }
    9178 
    9179  // 2. Process this allocation.
    9180  // There is allocation with suballoc.offset, suballoc.size.
    9181  outInfo.usedBytes += suballoc.size;
    9182  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    9183  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    9184 
    9185  // 3. Prepare for next iteration.
    9186  lastOffset = suballoc.offset + suballoc.size;
    9187  --nextAlloc2ndIndex;
    9188  }
    9189  // We are at the end.
    9190  else
    9191  {
    9192  // There is free space from lastOffset to size.
    9193  if(lastOffset < size)
    9194  {
    9195  const VkDeviceSize unusedRangeSize = size - lastOffset;
    9196  ++outInfo.unusedRangeCount;
    9197  outInfo.unusedBytes += unusedRangeSize;
    9198  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9199  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9200  }
    9201 
    9202  // End of loop.
    9203  lastOffset = size;
    9204  }
    9205  }
    9206  }
    9207 
    9208  outInfo.unusedBytes = size - outInfo.usedBytes;
    9209 }
    9210 
    9211 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
    9212 {
    9213  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9214  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9215  const VkDeviceSize size = GetSize();
    9216  const size_t suballoc1stCount = suballocations1st.size();
    9217  const size_t suballoc2ndCount = suballocations2nd.size();
    9218 
    9219  inoutStats.size += size;
    9220 
    9221  VkDeviceSize lastOffset = 0;
    9222 
    9223  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9224  {
    9225  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9226  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
    9227  while(lastOffset < freeSpace2ndTo1stEnd)
    9228  {
    9229  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9230  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9231  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9232  {
    9233  ++nextAlloc2ndIndex;
    9234  }
    9235 
    9236  // Found non-null allocation.
    9237  if(nextAlloc2ndIndex < suballoc2ndCount)
    9238  {
    9239  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9240 
    9241  // 1. Process free space before this allocation.
    9242  if(lastOffset < suballoc.offset)
    9243  {
    9244  // There is free space from lastOffset to suballoc.offset.
    9245  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9246  inoutStats.unusedSize += unusedRangeSize;
    9247  ++inoutStats.unusedRangeCount;
    9248  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9249  }
    9250 
    9251  // 2. Process this allocation.
    9252  // There is allocation with suballoc.offset, suballoc.size.
    9253  ++inoutStats.allocationCount;
    9254 
    9255  // 3. Prepare for next iteration.
    9256  lastOffset = suballoc.offset + suballoc.size;
    9257  ++nextAlloc2ndIndex;
    9258  }
    9259  // We are at the end.
    9260  else
    9261  {
    9262  if(lastOffset < freeSpace2ndTo1stEnd)
    9263  {
    9264  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9265  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    9266  inoutStats.unusedSize += unusedRangeSize;
    9267  ++inoutStats.unusedRangeCount;
    9268  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9269  }
    9270 
    9271  // End of loop.
    9272  lastOffset = freeSpace2ndTo1stEnd;
    9273  }
    9274  }
    9275  }
    9276 
    9277  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9278  const VkDeviceSize freeSpace1stTo2ndEnd =
    9279  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    9280  while(lastOffset < freeSpace1stTo2ndEnd)
    9281  {
    9282  // Find next non-null allocation or move nextAllocIndex to the end.
    9283  while(nextAlloc1stIndex < suballoc1stCount &&
    9284  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9285  {
    9286  ++nextAlloc1stIndex;
    9287  }
    9288 
    9289  // Found non-null allocation.
    9290  if(nextAlloc1stIndex < suballoc1stCount)
    9291  {
    9292  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9293 
    9294  // 1. Process free space before this allocation.
    9295  if(lastOffset < suballoc.offset)
    9296  {
    9297  // There is free space from lastOffset to suballoc.offset.
    9298  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9299  inoutStats.unusedSize += unusedRangeSize;
    9300  ++inoutStats.unusedRangeCount;
    9301  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9302  }
    9303 
    9304  // 2. Process this allocation.
    9305  // There is allocation with suballoc.offset, suballoc.size.
    9306  ++inoutStats.allocationCount;
    9307 
    9308  // 3. Prepare for next iteration.
    9309  lastOffset = suballoc.offset + suballoc.size;
    9310  ++nextAlloc1stIndex;
    9311  }
    9312  // We are at the end.
    9313  else
    9314  {
    9315  if(lastOffset < freeSpace1stTo2ndEnd)
    9316  {
    9317  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9318  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    9319  inoutStats.unusedSize += unusedRangeSize;
    9320  ++inoutStats.unusedRangeCount;
    9321  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9322  }
    9323 
    9324  // End of loop.
    9325  lastOffset = freeSpace1stTo2ndEnd;
    9326  }
    9327  }
    9328 
    9329  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9330  {
    9331  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9332  while(lastOffset < size)
    9333  {
    9334  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9335  while(nextAlloc2ndIndex != SIZE_MAX &&
    9336  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9337  {
    9338  --nextAlloc2ndIndex;
    9339  }
    9340 
    9341  // Found non-null allocation.
    9342  if(nextAlloc2ndIndex != SIZE_MAX)
    9343  {
    9344  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9345 
    9346  // 1. Process free space before this allocation.
    9347  if(lastOffset < suballoc.offset)
    9348  {
    9349  // There is free space from lastOffset to suballoc.offset.
    9350  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9351  inoutStats.unusedSize += unusedRangeSize;
    9352  ++inoutStats.unusedRangeCount;
    9353  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9354  }
    9355 
    9356  // 2. Process this allocation.
    9357  // There is allocation with suballoc.offset, suballoc.size.
    9358  ++inoutStats.allocationCount;
    9359 
    9360  // 3. Prepare for next iteration.
    9361  lastOffset = suballoc.offset + suballoc.size;
    9362  --nextAlloc2ndIndex;
    9363  }
    9364  // We are at the end.
    9365  else
    9366  {
    9367  if(lastOffset < size)
    9368  {
    9369  // There is free space from lastOffset to size.
    9370  const VkDeviceSize unusedRangeSize = size - lastOffset;
    9371  inoutStats.unusedSize += unusedRangeSize;
    9372  ++inoutStats.unusedRangeCount;
    9373  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9374  }
    9375 
    9376  // End of loop.
    9377  lastOffset = size;
    9378  }
    9379  }
    9380  }
    9381 }
    9382 
    9383 #if VMA_STATS_STRING_ENABLED
    9384 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
    9385 {
    9386  const VkDeviceSize size = GetSize();
    9387  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9388  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9389  const size_t suballoc1stCount = suballocations1st.size();
    9390  const size_t suballoc2ndCount = suballocations2nd.size();
    9391 
    9392  // FIRST PASS
    9393 
    9394  size_t unusedRangeCount = 0;
    9395  VkDeviceSize usedBytes = 0;
    9396 
    9397  VkDeviceSize lastOffset = 0;
    9398 
    9399  size_t alloc2ndCount = 0;
    9400  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9401  {
    9402  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9403  size_t nextAlloc2ndIndex = 0;
    9404  while(lastOffset < freeSpace2ndTo1stEnd)
    9405  {
    9406  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9407  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9408  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9409  {
    9410  ++nextAlloc2ndIndex;
    9411  }
    9412 
    9413  // Found non-null allocation.
    9414  if(nextAlloc2ndIndex < suballoc2ndCount)
    9415  {
    9416  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9417 
    9418  // 1. Process free space before this allocation.
    9419  if(lastOffset < suballoc.offset)
    9420  {
    9421  // There is free space from lastOffset to suballoc.offset.
    9422  ++unusedRangeCount;
    9423  }
    9424 
    9425  // 2. Process this allocation.
    9426  // There is allocation with suballoc.offset, suballoc.size.
    9427  ++alloc2ndCount;
    9428  usedBytes += suballoc.size;
    9429 
    9430  // 3. Prepare for next iteration.
    9431  lastOffset = suballoc.offset + suballoc.size;
    9432  ++nextAlloc2ndIndex;
    9433  }
    9434  // We are at the end.
    9435  else
    9436  {
    9437  if(lastOffset < freeSpace2ndTo1stEnd)
    9438  {
    9439  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9440  ++unusedRangeCount;
    9441  }
    9442 
    9443  // End of loop.
    9444  lastOffset = freeSpace2ndTo1stEnd;
    9445  }
    9446  }
    9447  }
    9448 
    9449  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9450  size_t alloc1stCount = 0;
    9451  const VkDeviceSize freeSpace1stTo2ndEnd =
    9452  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    9453  while(lastOffset < freeSpace1stTo2ndEnd)
    9454  {
    9455  // Find next non-null allocation or move nextAllocIndex to the end.
    9456  while(nextAlloc1stIndex < suballoc1stCount &&
    9457  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9458  {
    9459  ++nextAlloc1stIndex;
    9460  }
    9461 
    9462  // Found non-null allocation.
    9463  if(nextAlloc1stIndex < suballoc1stCount)
    9464  {
    9465  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9466 
    9467  // 1. Process free space before this allocation.
    9468  if(lastOffset < suballoc.offset)
    9469  {
    9470  // There is free space from lastOffset to suballoc.offset.
    9471  ++unusedRangeCount;
    9472  }
    9473 
    9474  // 2. Process this allocation.
    9475  // There is allocation with suballoc.offset, suballoc.size.
    9476  ++alloc1stCount;
    9477  usedBytes += suballoc.size;
    9478 
    9479  // 3. Prepare for next iteration.
    9480  lastOffset = suballoc.offset + suballoc.size;
    9481  ++nextAlloc1stIndex;
    9482  }
    9483  // We are at the end.
    9484  else
    9485  {
    9486  if(lastOffset < size)
    9487  {
    9488  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9489  ++unusedRangeCount;
    9490  }
    9491 
    9492  // End of loop.
    9493  lastOffset = freeSpace1stTo2ndEnd;
    9494  }
    9495  }
    9496 
    9497  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9498  {
    9499  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9500  while(lastOffset < size)
    9501  {
    9502  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9503  while(nextAlloc2ndIndex != SIZE_MAX &&
    9504  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9505  {
    9506  --nextAlloc2ndIndex;
    9507  }
    9508 
    9509  // Found non-null allocation.
    9510  if(nextAlloc2ndIndex != SIZE_MAX)
    9511  {
    9512  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9513 
    9514  // 1. Process free space before this allocation.
    9515  if(lastOffset < suballoc.offset)
    9516  {
    9517  // There is free space from lastOffset to suballoc.offset.
    9518  ++unusedRangeCount;
    9519  }
    9520 
    9521  // 2. Process this allocation.
    9522  // There is allocation with suballoc.offset, suballoc.size.
    9523  ++alloc2ndCount;
    9524  usedBytes += suballoc.size;
    9525 
    9526  // 3. Prepare for next iteration.
    9527  lastOffset = suballoc.offset + suballoc.size;
    9528  --nextAlloc2ndIndex;
    9529  }
    9530  // We are at the end.
    9531  else
    9532  {
    9533  if(lastOffset < size)
    9534  {
    9535  // There is free space from lastOffset to size.
    9536  ++unusedRangeCount;
    9537  }
    9538 
    9539  // End of loop.
    9540  lastOffset = size;
    9541  }
    9542  }
    9543  }
    9544 
    9545  const VkDeviceSize unusedBytes = size - usedBytes;
    9546  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
    9547 
    9548  // SECOND PASS
    9549  lastOffset = 0;
    9550 
    9551  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9552  {
    9553  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9554  size_t nextAlloc2ndIndex = 0;
    9555  while(lastOffset < freeSpace2ndTo1stEnd)
    9556  {
    9557  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9558  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9559  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9560  {
    9561  ++nextAlloc2ndIndex;
    9562  }
    9563 
    9564  // Found non-null allocation.
    9565  if(nextAlloc2ndIndex < suballoc2ndCount)
    9566  {
    9567  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9568 
    9569  // 1. Process free space before this allocation.
    9570  if(lastOffset < suballoc.offset)
    9571  {
    9572  // There is free space from lastOffset to suballoc.offset.
    9573  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9574  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9575  }
    9576 
    9577  // 2. Process this allocation.
    9578  // There is allocation with suballoc.offset, suballoc.size.
    9579  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    9580 
    9581  // 3. Prepare for next iteration.
    9582  lastOffset = suballoc.offset + suballoc.size;
    9583  ++nextAlloc2ndIndex;
    9584  }
    9585  // We are at the end.
    9586  else
    9587  {
    9588  if(lastOffset < freeSpace2ndTo1stEnd)
    9589  {
    9590  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9591  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    9592  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9593  }
    9594 
    9595  // End of loop.
    9596  lastOffset = freeSpace2ndTo1stEnd;
    9597  }
    9598  }
    9599  }
    9600 
    9601  nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9602  while(lastOffset < freeSpace1stTo2ndEnd)
    9603  {
    9604  // Find next non-null allocation or move nextAllocIndex to the end.
    9605  while(nextAlloc1stIndex < suballoc1stCount &&
    9606  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9607  {
    9608  ++nextAlloc1stIndex;
    9609  }
    9610 
    9611  // Found non-null allocation.
    9612  if(nextAlloc1stIndex < suballoc1stCount)
    9613  {
    9614  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9615 
    9616  // 1. Process free space before this allocation.
    9617  if(lastOffset < suballoc.offset)
    9618  {
    9619  // There is free space from lastOffset to suballoc.offset.
    9620  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9621  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9622  }
    9623 
    9624  // 2. Process this allocation.
    9625  // There is allocation with suballoc.offset, suballoc.size.
    9626  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    9627 
    9628  // 3. Prepare for next iteration.
    9629  lastOffset = suballoc.offset + suballoc.size;
    9630  ++nextAlloc1stIndex;
    9631  }
    9632  // We are at the end.
    9633  else
    9634  {
    9635  if(lastOffset < freeSpace1stTo2ndEnd)
    9636  {
    9637  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9638  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    9639  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9640  }
    9641 
    9642  // End of loop.
    9643  lastOffset = freeSpace1stTo2ndEnd;
    9644  }
    9645  }
    9646 
    9647  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9648  {
    9649  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9650  while(lastOffset < size)
    9651  {
    9652  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9653  while(nextAlloc2ndIndex != SIZE_MAX &&
    9654  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9655  {
    9656  --nextAlloc2ndIndex;
    9657  }
    9658 
    9659  // Found non-null allocation.
    9660  if(nextAlloc2ndIndex != SIZE_MAX)
    9661  {
    9662  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9663 
    9664  // 1. Process free space before this allocation.
    9665  if(lastOffset < suballoc.offset)
    9666  {
    9667  // There is free space from lastOffset to suballoc.offset.
    9668  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9669  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9670  }
    9671 
    9672  // 2. Process this allocation.
    9673  // There is allocation with suballoc.offset, suballoc.size.
    9674  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    9675 
    9676  // 3. Prepare for next iteration.
    9677  lastOffset = suballoc.offset + suballoc.size;
    9678  --nextAlloc2ndIndex;
    9679  }
    9680  // We are at the end.
    9681  else
    9682  {
    9683  if(lastOffset < size)
    9684  {
    9685  // There is free space from lastOffset to size.
    9686  const VkDeviceSize unusedRangeSize = size - lastOffset;
    9687  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9688  }
    9689 
    9690  // End of loop.
    9691  lastOffset = size;
    9692  }
    9693  }
    9694  }
    9695 
    9696  PrintDetailedMap_End(json);
    9697 }
    9698 #endif // #if VMA_STATS_STRING_ENABLED
    9699 
    9700 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
    9701  uint32_t currentFrameIndex,
    9702  uint32_t frameInUseCount,
    9703  VkDeviceSize bufferImageGranularity,
    9704  VkDeviceSize allocSize,
    9705  VkDeviceSize allocAlignment,
    9706  bool upperAddress,
    9707  VmaSuballocationType allocType,
    9708  bool canMakeOtherLost,
    9709  uint32_t strategy,
    9710  VmaAllocationRequest* pAllocationRequest)
    9711 {
    9712  VMA_ASSERT(allocSize > 0);
    9713  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    9714  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    9715  VMA_HEAVY_ASSERT(Validate());
    9716  return upperAddress ?
    9717  CreateAllocationRequest_UpperAddress(
    9718  currentFrameIndex, frameInUseCount, bufferImageGranularity,
    9719  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
    9720  CreateAllocationRequest_LowerAddress(
    9721  currentFrameIndex, frameInUseCount, bufferImageGranularity,
    9722  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
    9723 }
    9724 
    9725 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
    9726  uint32_t currentFrameIndex,
    9727  uint32_t frameInUseCount,
    9728  VkDeviceSize bufferImageGranularity,
    9729  VkDeviceSize allocSize,
    9730  VkDeviceSize allocAlignment,
    9731  VmaSuballocationType allocType,
    9732  bool canMakeOtherLost,
    9733  uint32_t strategy,
    9734  VmaAllocationRequest* pAllocationRequest)
    9735 {
    9736  const VkDeviceSize size = GetSize();
    9737  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9738  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9739 
    9740  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9741  {
    9742  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
    9743  return false;
    9744  }
    9745 
    9746  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
    9747  if(allocSize > size)
    9748  {
    9749  return false;
    9750  }
    9751  VkDeviceSize resultBaseOffset = size - allocSize;
    9752  if(!suballocations2nd.empty())
    9753  {
    9754  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9755  resultBaseOffset = lastSuballoc.offset - allocSize;
    9756  if(allocSize > lastSuballoc.offset)
    9757  {
    9758  return false;
    9759  }
    9760  }
    9761 
    9762  // Start from offset equal to end of free space.
    9763  VkDeviceSize resultOffset = resultBaseOffset;
    9764 
    9765  // Apply VMA_DEBUG_MARGIN at the end.
    9766  if(VMA_DEBUG_MARGIN > 0)
    9767  {
    9768  if(resultOffset < VMA_DEBUG_MARGIN)
    9769  {
    9770  return false;
    9771  }
    9772  resultOffset -= VMA_DEBUG_MARGIN;
    9773  }
    9774 
    9775  // Apply alignment.
    9776  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
    9777 
    9778  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
    9779  // Make bigger alignment if necessary.
    9780  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    9781  {
    9782  bool bufferImageGranularityConflict = false;
    9783  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    9784  {
    9785  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    9786  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    9787  {
    9788  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
    9789  {
    9790  bufferImageGranularityConflict = true;
    9791  break;
    9792  }
    9793  }
    9794  else
    9795  // Already on previous page.
    9796  break;
    9797  }
    9798  if(bufferImageGranularityConflict)
    9799  {
    9800  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
    9801  }
    9802  }
    9803 
    9804  // There is enough free space.
    9805  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
    9806  suballocations1st.back().offset + suballocations1st.back().size :
    9807  0;
    9808  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
    9809  {
    9810  // Check previous suballocations for BufferImageGranularity conflicts.
    9811  // If conflict exists, allocation cannot be made here.
    9812  if(bufferImageGranularity > 1)
    9813  {
    9814  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    9815  {
    9816  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    9817  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    9818  {
    9819  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
    9820  {
    9821  return false;
    9822  }
    9823  }
    9824  else
    9825  {
    9826  // Already on next page.
    9827  break;
    9828  }
    9829  }
    9830  }
    9831 
    9832  // All tests passed: Success.
    9833  pAllocationRequest->offset = resultOffset;
    9834  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
    9835  pAllocationRequest->sumItemSize = 0;
    9836  // pAllocationRequest->item unused.
    9837  pAllocationRequest->itemsToMakeLostCount = 0;
    9838  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
    9839  return true;
    9840  }
    9841 
    9842  return false;
    9843 }
    9844 
    9845 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
    9846  uint32_t currentFrameIndex,
    9847  uint32_t frameInUseCount,
    9848  VkDeviceSize bufferImageGranularity,
    9849  VkDeviceSize allocSize,
    9850  VkDeviceSize allocAlignment,
    9851  VmaSuballocationType allocType,
    9852  bool canMakeOtherLost,
    9853  uint32_t strategy,
    9854  VmaAllocationRequest* pAllocationRequest)
    9855 {
    9856  const VkDeviceSize size = GetSize();
    9857  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9858  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9859 
    9860  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9861  {
    9862  // Try to allocate at the end of 1st vector.
    9863 
    9864  VkDeviceSize resultBaseOffset = 0;
    9865  if(!suballocations1st.empty())
    9866  {
    9867  const VmaSuballocation& lastSuballoc = suballocations1st.back();
    9868  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    9869  }
    9870 
    9871  // Start from offset equal to beginning of free space.
    9872  VkDeviceSize resultOffset = resultBaseOffset;
    9873 
    9874  // Apply VMA_DEBUG_MARGIN at the beginning.
    9875  if(VMA_DEBUG_MARGIN > 0)
    9876  {
    9877  resultOffset += VMA_DEBUG_MARGIN;
    9878  }
    9879 
    9880  // Apply alignment.
    9881  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    9882 
    9883  // Check previous suballocations for BufferImageGranularity conflicts.
    9884  // Make bigger alignment if necessary.
    9885  if(bufferImageGranularity > 1 && !suballocations1st.empty())
    9886  {
    9887  bool bufferImageGranularityConflict = false;
    9888  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    9889  {
    9890  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    9891  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    9892  {
    9893  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    9894  {
    9895  bufferImageGranularityConflict = true;
    9896  break;
    9897  }
    9898  }
    9899  else
    9900  // Already on previous page.
    9901  break;
    9902  }
    9903  if(bufferImageGranularityConflict)
    9904  {
    9905  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    9906  }
    9907  }
    9908 
    9909  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
    9910  suballocations2nd.back().offset : size;
    9911 
    9912  // There is enough free space at the end after alignment.
    9913  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
    9914  {
    9915  // Check next suballocations for BufferImageGranularity conflicts.
    9916  // If conflict exists, allocation cannot be made here.
    9917  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9918  {
    9919  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    9920  {
    9921  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    9922  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    9923  {
    9924  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    9925  {
    9926  return false;
    9927  }
    9928  }
    9929  else
    9930  {
    9931  // Already on previous page.
    9932  break;
    9933  }
    9934  }
    9935  }
    9936 
    9937  // All tests passed: Success.
    9938  pAllocationRequest->offset = resultOffset;
    9939  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
    9940  pAllocationRequest->sumItemSize = 0;
    9941  // pAllocationRequest->item, customData unused.
    9942  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
    9943  pAllocationRequest->itemsToMakeLostCount = 0;
    9944  return true;
    9945  }
    9946  }
    9947 
    9948  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
    9949  // beginning of 1st vector as the end of free space.
    9950  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9951  {
    9952  VMA_ASSERT(!suballocations1st.empty());
    9953 
    9954  VkDeviceSize resultBaseOffset = 0;
    9955  if(!suballocations2nd.empty())
    9956  {
    9957  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9958  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    9959  }
    9960 
    9961  // Start from offset equal to beginning of free space.
    9962  VkDeviceSize resultOffset = resultBaseOffset;
    9963 
    9964  // Apply VMA_DEBUG_MARGIN at the beginning.
    9965  if(VMA_DEBUG_MARGIN > 0)
    9966  {
    9967  resultOffset += VMA_DEBUG_MARGIN;
    9968  }
    9969 
    9970  // Apply alignment.
    9971  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    9972 
    9973  // Check previous suballocations for BufferImageGranularity conflicts.
    9974  // Make bigger alignment if necessary.
    9975  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    9976  {
    9977  bool bufferImageGranularityConflict = false;
    9978  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
    9979  {
    9980  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
    9981  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    9982  {
    9983  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    9984  {
    9985  bufferImageGranularityConflict = true;
    9986  break;
    9987  }
    9988  }
    9989  else
    9990  // Already on previous page.
    9991  break;
    9992  }
    9993  if(bufferImageGranularityConflict)
    9994  {
    9995  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    9996  }
    9997  }
    9998 
    9999  pAllocationRequest->itemsToMakeLostCount = 0;
    10000  pAllocationRequest->sumItemSize = 0;
    10001  size_t index1st = m_1stNullItemsBeginCount;
    10002 
    10003  if(canMakeOtherLost)
    10004  {
    10005  while(index1st < suballocations1st.size() &&
    10006  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
    10007  {
    10008  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
    10009  const VmaSuballocation& suballoc = suballocations1st[index1st];
    10010  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
    10011  {
    10012  // No problem.
    10013  }
    10014  else
    10015  {
    10016  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    10017  if(suballoc.hAllocation->CanBecomeLost() &&
    10018  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    10019  {
    10020  ++pAllocationRequest->itemsToMakeLostCount;
    10021  pAllocationRequest->sumItemSize += suballoc.size;
    10022  }
    10023  else
    10024  {
    10025  return false;
    10026  }
    10027  }
    10028  ++index1st;
    10029  }
    10030 
    10031  // Check next suballocations for BufferImageGranularity conflicts.
    10032  // If conflict exists, we must mark more allocations lost or fail.
    10033  if(bufferImageGranularity > 1)
    10034  {
    10035  while(index1st < suballocations1st.size())
    10036  {
    10037  const VmaSuballocation& suballoc = suballocations1st[index1st];
    10038  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
    10039  {
    10040  if(suballoc.hAllocation != VK_NULL_HANDLE)
    10041  {
    10042  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
    10043  if(suballoc.hAllocation->CanBecomeLost() &&
    10044  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    10045  {
    10046  ++pAllocationRequest->itemsToMakeLostCount;
    10047  pAllocationRequest->sumItemSize += suballoc.size;
    10048  }
    10049  else
    10050  {
    10051  return false;
    10052  }
    10053  }
    10054  }
    10055  else
    10056  {
    10057  // Already on next page.
    10058  break;
    10059  }
    10060  ++index1st;
    10061  }
    10062  }
    10063 
    10064  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
    10065  if(index1st == suballocations1st.size() &&
    10066  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
    10067  {
    10068  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
    10069  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
    10070  }
    10071  }
    10072 
    10073  // There is enough free space at the end after alignment.
    10074  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
    10075  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
    10076  {
    10077  // Check next suballocations for BufferImageGranularity conflicts.
    10078  // If conflict exists, allocation cannot be made here.
    10079  if(bufferImageGranularity > 1)
    10080  {
    10081  for(size_t nextSuballocIndex = index1st;
    10082  nextSuballocIndex < suballocations1st.size();
    10083  nextSuballocIndex++)
    10084  {
    10085  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
    10086  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    10087  {
    10088  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    10089  {
    10090  return false;
    10091  }
    10092  }
    10093  else
    10094  {
    10095  // Already on next page.
    10096  break;
    10097  }
    10098  }
    10099  }
    10100 
    10101  // All tests passed: Success.
    10102  pAllocationRequest->offset = resultOffset;
    10103  pAllocationRequest->sumFreeSize =
    10104  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
    10105  - resultBaseOffset
    10106  - pAllocationRequest->sumItemSize;
    10107  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
    10108  // pAllocationRequest->item, customData unused.
    10109  return true;
    10110  }
    10111  }
    10112 
    10113  return false;
    10114 }
    10115 
    10116 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
    10117  uint32_t currentFrameIndex,
    10118  uint32_t frameInUseCount,
    10119  VmaAllocationRequest* pAllocationRequest)
    10120 {
    10121  if(pAllocationRequest->itemsToMakeLostCount == 0)
    10122  {
    10123  return true;
    10124  }
    10125 
    10126  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
    10127 
    10128  // We always start from 1st.
    10129  SuballocationVectorType* suballocations = &AccessSuballocations1st();
    10130  size_t index = m_1stNullItemsBeginCount;
    10131  size_t madeLostCount = 0;
    10132  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
    10133  {
    10134  if(index == suballocations->size())
    10135  {
    10136  index = 0;
    10137  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
    10138  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    10139  {
    10140  suballocations = &AccessSuballocations2nd();
    10141  }
    10142  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
    10143  // suballocations continues pointing at AccessSuballocations1st().
    10144  VMA_ASSERT(!suballocations->empty());
    10145  }
    10146  VmaSuballocation& suballoc = (*suballocations)[index];
    10147  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    10148  {
    10149  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    10150  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
    10151  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    10152  {
    10153  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10154  suballoc.hAllocation = VK_NULL_HANDLE;
    10155  m_SumFreeSize += suballoc.size;
    10156  if(suballocations == &AccessSuballocations1st())
    10157  {
    10158  ++m_1stNullItemsMiddleCount;
    10159  }
    10160  else
    10161  {
    10162  ++m_2ndNullItemsCount;
    10163  }
    10164  ++madeLostCount;
    10165  }
    10166  else
    10167  {
    10168  return false;
    10169  }
    10170  }
    10171  ++index;
    10172  }
    10173 
    10174  CleanupAfterFree();
    10175  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
    10176 
    10177  return true;
    10178 }
    10179 
    10180 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    10181 {
    10182  uint32_t lostAllocationCount = 0;
    10183 
    10184  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10185  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    10186  {
    10187  VmaSuballocation& suballoc = suballocations1st[i];
    10188  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    10189  suballoc.hAllocation->CanBecomeLost() &&
    10190  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    10191  {
    10192  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10193  suballoc.hAllocation = VK_NULL_HANDLE;
    10194  ++m_1stNullItemsMiddleCount;
    10195  m_SumFreeSize += suballoc.size;
    10196  ++lostAllocationCount;
    10197  }
    10198  }
    10199 
    10200  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10201  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    10202  {
    10203  VmaSuballocation& suballoc = suballocations2nd[i];
    10204  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    10205  suballoc.hAllocation->CanBecomeLost() &&
    10206  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    10207  {
    10208  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10209  suballoc.hAllocation = VK_NULL_HANDLE;
    10210  ++m_2ndNullItemsCount;
    10211  m_SumFreeSize += suballoc.size;
    10212  ++lostAllocationCount;
    10213  }
    10214  }
    10215 
    10216  if(lostAllocationCount)
    10217  {
    10218  CleanupAfterFree();
    10219  }
    10220 
    10221  return lostAllocationCount;
    10222 }
    10223 
    10224 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
    10225 {
    10226  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10227  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    10228  {
    10229  const VmaSuballocation& suballoc = suballocations1st[i];
    10230  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    10231  {
    10232  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    10233  {
    10234  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    10235  return VK_ERROR_VALIDATION_FAILED_EXT;
    10236  }
    10237  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    10238  {
    10239  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    10240  return VK_ERROR_VALIDATION_FAILED_EXT;
    10241  }
    10242  }
    10243  }
    10244 
    10245  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10246  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    10247  {
    10248  const VmaSuballocation& suballoc = suballocations2nd[i];
    10249  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    10250  {
    10251  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    10252  {
    10253  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    10254  return VK_ERROR_VALIDATION_FAILED_EXT;
    10255  }
    10256  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    10257  {
    10258  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    10259  return VK_ERROR_VALIDATION_FAILED_EXT;
    10260  }
    10261  }
    10262  }
    10263 
    10264  return VK_SUCCESS;
    10265 }
    10266 
    10267 void VmaBlockMetadata_Linear::Alloc(
    10268  const VmaAllocationRequest& request,
    10269  VmaSuballocationType type,
    10270  VkDeviceSize allocSize,
    10271  VmaAllocation hAllocation)
    10272 {
    10273  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
    10274 
    10275  switch(request.type)
    10276  {
    10277  case VmaAllocationRequestType::UpperAddress:
    10278  {
    10279  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
    10280  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
    10281  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10282  suballocations2nd.push_back(newSuballoc);
    10283  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
    10284  }
    10285  break;
    10286  case VmaAllocationRequestType::EndOf1st:
    10287  {
    10288  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10289 
    10290  VMA_ASSERT(suballocations1st.empty() ||
    10291  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
    10292  // Check if it fits before the end of the block.
    10293  VMA_ASSERT(request.offset + allocSize <= GetSize());
    10294 
    10295  suballocations1st.push_back(newSuballoc);
    10296  }
    10297  break;
    10298  case VmaAllocationRequestType::EndOf2nd:
    10299  {
    10300  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10301  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
    10302  VMA_ASSERT(!suballocations1st.empty() &&
    10303  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
    10304  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10305 
    10306  switch(m_2ndVectorMode)
    10307  {
    10308  case SECOND_VECTOR_EMPTY:
    10309  // First allocation from second part ring buffer.
    10310  VMA_ASSERT(suballocations2nd.empty());
    10311  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
    10312  break;
    10313  case SECOND_VECTOR_RING_BUFFER:
    10314  // 2-part ring buffer is already started.
    10315  VMA_ASSERT(!suballocations2nd.empty());
    10316  break;
    10317  case SECOND_VECTOR_DOUBLE_STACK:
    10318  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
    10319  break;
    10320  default:
    10321  VMA_ASSERT(0);
    10322  }
    10323 
    10324  suballocations2nd.push_back(newSuballoc);
    10325  }
    10326  break;
    10327  default:
    10328  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
    10329  }
    10330 
    10331  m_SumFreeSize -= newSuballoc.size;
    10332 }
    10333 
    10334 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
    10335 {
    10336  FreeAtOffset(allocation->GetOffset());
    10337 }
    10338 
    10339 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
    10340 {
    10341  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10342  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10343 
    10344  if(!suballocations1st.empty())
    10345  {
    10346  // First allocation: Mark it as next empty at the beginning.
    10347  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    10348  if(firstSuballoc.offset == offset)
    10349  {
    10350  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10351  firstSuballoc.hAllocation = VK_NULL_HANDLE;
    10352  m_SumFreeSize += firstSuballoc.size;
    10353  ++m_1stNullItemsBeginCount;
    10354  CleanupAfterFree();
    10355  return;
    10356  }
    10357  }
    10358 
    10359  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
    10360  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
    10361  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    10362  {
    10363  VmaSuballocation& lastSuballoc = suballocations2nd.back();
    10364  if(lastSuballoc.offset == offset)
    10365  {
    10366  m_SumFreeSize += lastSuballoc.size;
    10367  suballocations2nd.pop_back();
    10368  CleanupAfterFree();
    10369  return;
    10370  }
    10371  }
    10372  // Last allocation in 1st vector.
    10373  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
    10374  {
    10375  VmaSuballocation& lastSuballoc = suballocations1st.back();
    10376  if(lastSuballoc.offset == offset)
    10377  {
    10378  m_SumFreeSize += lastSuballoc.size;
    10379  suballocations1st.pop_back();
    10380  CleanupAfterFree();
    10381  return;
    10382  }
    10383  }
    10384 
    10385  // Item from the middle of 1st vector.
    10386  {
    10387  VmaSuballocation refSuballoc;
    10388  refSuballoc.offset = offset;
    10389  // Rest of members stays uninitialized intentionally for better performance.
    10390  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
    10391  suballocations1st.begin() + m_1stNullItemsBeginCount,
    10392  suballocations1st.end(),
    10393  refSuballoc);
    10394  if(it != suballocations1st.end())
    10395  {
    10396  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    10397  it->hAllocation = VK_NULL_HANDLE;
    10398  ++m_1stNullItemsMiddleCount;
    10399  m_SumFreeSize += it->size;
    10400  CleanupAfterFree();
    10401  return;
    10402  }
    10403  }
    10404 
    10405  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
    10406  {
    10407  // Item from the middle of 2nd vector.
    10408  VmaSuballocation refSuballoc;
    10409  refSuballoc.offset = offset;
    10410  // Rest of members stays uninitialized intentionally for better performance.
    10411  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
    10412  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
    10413  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
    10414  if(it != suballocations2nd.end())
    10415  {
    10416  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    10417  it->hAllocation = VK_NULL_HANDLE;
    10418  ++m_2ndNullItemsCount;
    10419  m_SumFreeSize += it->size;
    10420  CleanupAfterFree();
    10421  return;
    10422  }
    10423  }
    10424 
    10425  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
    10426 }
    10427 
    10428 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
    10429 {
    10430  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    10431  const size_t suballocCount = AccessSuballocations1st().size();
    10432  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
    10433 }
    10434 
    10435 void VmaBlockMetadata_Linear::CleanupAfterFree()
    10436 {
    10437  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10438  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10439 
    10440  if(IsEmpty())
    10441  {
    10442  suballocations1st.clear();
    10443  suballocations2nd.clear();
    10444  m_1stNullItemsBeginCount = 0;
    10445  m_1stNullItemsMiddleCount = 0;
    10446  m_2ndNullItemsCount = 0;
    10447  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    10448  }
    10449  else
    10450  {
    10451  const size_t suballoc1stCount = suballocations1st.size();
    10452  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    10453  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
    10454 
    10455  // Find more null items at the beginning of 1st vector.
    10456  while(m_1stNullItemsBeginCount < suballoc1stCount &&
    10457  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    10458  {
    10459  ++m_1stNullItemsBeginCount;
    10460  --m_1stNullItemsMiddleCount;
    10461  }
    10462 
    10463  // Find more null items at the end of 1st vector.
    10464  while(m_1stNullItemsMiddleCount > 0 &&
    10465  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
    10466  {
    10467  --m_1stNullItemsMiddleCount;
    10468  suballocations1st.pop_back();
    10469  }
    10470 
    10471  // Find more null items at the end of 2nd vector.
    10472  while(m_2ndNullItemsCount > 0 &&
    10473  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
    10474  {
    10475  --m_2ndNullItemsCount;
    10476  suballocations2nd.pop_back();
    10477  }
    10478 
    10479  // Find more null items at the beginning of 2nd vector.
    10480  while(m_2ndNullItemsCount > 0 &&
    10481  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
    10482  {
    10483  --m_2ndNullItemsCount;
    10484  suballocations2nd.remove(0);
    10485  }
    10486 
    10487  if(ShouldCompact1st())
    10488  {
    10489  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
    10490  size_t srcIndex = m_1stNullItemsBeginCount;
    10491  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
    10492  {
    10493  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
    10494  {
    10495  ++srcIndex;
    10496  }
    10497  if(dstIndex != srcIndex)
    10498  {
    10499  suballocations1st[dstIndex] = suballocations1st[srcIndex];
    10500  }
    10501  ++srcIndex;
    10502  }
    10503  suballocations1st.resize(nonNullItemCount);
    10504  m_1stNullItemsBeginCount = 0;
    10505  m_1stNullItemsMiddleCount = 0;
    10506  }
    10507 
    10508  // 2nd vector became empty.
    10509  if(suballocations2nd.empty())
    10510  {
    10511  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    10512  }
    10513 
    10514  // 1st vector became empty.
    10515  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
    10516  {
    10517  suballocations1st.clear();
    10518  m_1stNullItemsBeginCount = 0;
    10519 
    10520  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    10521  {
    10522  // Swap 1st with 2nd. Now 2nd is empty.
    10523  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    10524  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
    10525  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
    10526  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    10527  {
    10528  ++m_1stNullItemsBeginCount;
    10529  --m_1stNullItemsMiddleCount;
    10530  }
    10531  m_2ndNullItemsCount = 0;
    10532  m_1stVectorIndex ^= 1;
    10533  }
    10534  }
    10535  }
    10536 
    10537  VMA_HEAVY_ASSERT(Validate());
    10538 }
    10539 
    10540 
    10542 // class VmaBlockMetadata_Buddy
    10543 
    10544 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
    10545  VmaBlockMetadata(hAllocator),
    10546  m_Root(VMA_NULL),
    10547  m_AllocationCount(0),
    10548  m_FreeCount(1),
    10549  m_SumFreeSize(0)
    10550 {
    10551  memset(m_FreeList, 0, sizeof(m_FreeList));
    10552 }
    10553 
    10554 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
    10555 {
    10556  DeleteNode(m_Root);
    10557 }
    10558 
    10559 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
    10560 {
    10561  VmaBlockMetadata::Init(size);
    10562 
    10563  m_UsableSize = VmaPrevPow2(size);
    10564  m_SumFreeSize = m_UsableSize;
    10565 
    10566  // Calculate m_LevelCount.
    10567  m_LevelCount = 1;
    10568  while(m_LevelCount < MAX_LEVELS &&
    10569  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
    10570  {
    10571  ++m_LevelCount;
    10572  }
    10573 
    10574  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
    10575  rootNode->offset = 0;
    10576  rootNode->type = Node::TYPE_FREE;
    10577  rootNode->parent = VMA_NULL;
    10578  rootNode->buddy = VMA_NULL;
    10579 
    10580  m_Root = rootNode;
    10581  AddToFreeListFront(0, rootNode);
    10582 }
    10583 
    10584 bool VmaBlockMetadata_Buddy::Validate() const
    10585 {
    10586  // Validate tree.
    10587  ValidationContext ctx;
    10588  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
    10589  {
    10590  VMA_VALIDATE(false && "ValidateNode failed.");
    10591  }
    10592  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
    10593  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
    10594 
    10595  // Validate free node lists.
    10596  for(uint32_t level = 0; level < m_LevelCount; ++level)
    10597  {
    10598  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
    10599  m_FreeList[level].front->free.prev == VMA_NULL);
    10600 
    10601  for(Node* node = m_FreeList[level].front;
    10602  node != VMA_NULL;
    10603  node = node->free.next)
    10604  {
    10605  VMA_VALIDATE(node->type == Node::TYPE_FREE);
    10606 
    10607  if(node->free.next == VMA_NULL)
    10608  {
    10609  VMA_VALIDATE(m_FreeList[level].back == node);
    10610  }
    10611  else
    10612  {
    10613  VMA_VALIDATE(node->free.next->free.prev == node);
    10614  }
    10615  }
    10616  }
    10617 
    10618  // Validate that free lists ar higher levels are empty.
    10619  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
    10620  {
    10621  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
    10622  }
    10623 
    10624  return true;
    10625 }
    10626 
    10627 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
    10628 {
    10629  for(uint32_t level = 0; level < m_LevelCount; ++level)
    10630  {
    10631  if(m_FreeList[level].front != VMA_NULL)
    10632  {
    10633  return LevelToNodeSize(level);
    10634  }
    10635  }
    10636  return 0;
    10637 }
    10638 
    10639 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    10640 {
    10641  const VkDeviceSize unusableSize = GetUnusableSize();
    10642 
    10643  outInfo.blockCount = 1;
    10644 
    10645  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
    10646  outInfo.usedBytes = outInfo.unusedBytes = 0;
    10647 
    10648  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
    10649  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
    10650  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
    10651 
    10652  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
    10653 
    10654  if(unusableSize > 0)
    10655  {
    10656  ++outInfo.unusedRangeCount;
    10657  outInfo.unusedBytes += unusableSize;
    10658  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
    10659  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
    10660  }
    10661 }
    10662 
    10663 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
    10664 {
    10665  const VkDeviceSize unusableSize = GetUnusableSize();
    10666 
    10667  inoutStats.size += GetSize();
    10668  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
    10669  inoutStats.allocationCount += m_AllocationCount;
    10670  inoutStats.unusedRangeCount += m_FreeCount;
    10671  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    10672 
    10673  if(unusableSize > 0)
    10674  {
    10675  ++inoutStats.unusedRangeCount;
    10676  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
    10677  }
    10678 }
    10679 
    10680 #if VMA_STATS_STRING_ENABLED
    10681 
    10682 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
    10683 {
    10684  // TODO optimize
    10685  VmaStatInfo stat;
    10686  CalcAllocationStatInfo(stat);
    10687 
    10688  PrintDetailedMap_Begin(
    10689  json,
    10690  stat.unusedBytes,
    10691  stat.allocationCount,
    10692  stat.unusedRangeCount);
    10693 
    10694  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
    10695 
    10696  const VkDeviceSize unusableSize = GetUnusableSize();
    10697  if(unusableSize > 0)
    10698  {
    10699  PrintDetailedMap_UnusedRange(json,
    10700  m_UsableSize, // offset
    10701  unusableSize); // size
    10702  }
    10703 
    10704  PrintDetailedMap_End(json);
    10705 }
    10706 
    10707 #endif // #if VMA_STATS_STRING_ENABLED
    10708 
    10709 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
    10710  uint32_t currentFrameIndex,
    10711  uint32_t frameInUseCount,
    10712  VkDeviceSize bufferImageGranularity,
    10713  VkDeviceSize allocSize,
    10714  VkDeviceSize allocAlignment,
    10715  bool upperAddress,
    10716  VmaSuballocationType allocType,
    10717  bool canMakeOtherLost,
    10718  uint32_t strategy,
    10719  VmaAllocationRequest* pAllocationRequest)
    10720 {
    10721  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
    10722 
    10723  // Simple way to respect bufferImageGranularity. May be optimized some day.
    10724  // Whenever it might be an OPTIMAL image...
    10725  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
    10726  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    10727  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
    10728  {
    10729  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
    10730  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
    10731  }
    10732 
    10733  if(allocSize > m_UsableSize)
    10734  {
    10735  return false;
    10736  }
    10737 
    10738  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    10739  for(uint32_t level = targetLevel + 1; level--; )
    10740  {
    10741  for(Node* freeNode = m_FreeList[level].front;
    10742  freeNode != VMA_NULL;
    10743  freeNode = freeNode->free.next)
    10744  {
    10745  if(freeNode->offset % allocAlignment == 0)
    10746  {
    10747  pAllocationRequest->type = VmaAllocationRequestType::Normal;
    10748  pAllocationRequest->offset = freeNode->offset;
    10749  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
    10750  pAllocationRequest->sumItemSize = 0;
    10751  pAllocationRequest->itemsToMakeLostCount = 0;
    10752  pAllocationRequest->customData = (void*)(uintptr_t)level;
    10753  return true;
    10754  }
    10755  }
    10756  }
    10757 
    10758  return false;
    10759 }
    10760 
    10761 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
    10762  uint32_t currentFrameIndex,
    10763  uint32_t frameInUseCount,
    10764  VmaAllocationRequest* pAllocationRequest)
    10765 {
    10766  /*
    10767  Lost allocations are not supported in buddy allocator at the moment.
    10768  Support might be added in the future.
    10769  */
    10770  return pAllocationRequest->itemsToMakeLostCount == 0;
    10771 }
    10772 
    10773 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    10774 {
    10775  /*
    10776  Lost allocations are not supported in buddy allocator at the moment.
    10777  Support might be added in the future.
    10778  */
    10779  return 0;
    10780 }
    10781 
    10782 void VmaBlockMetadata_Buddy::Alloc(
    10783  const VmaAllocationRequest& request,
    10784  VmaSuballocationType type,
    10785  VkDeviceSize allocSize,
    10786  VmaAllocation hAllocation)
    10787 {
    10788  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
    10789 
    10790  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    10791  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
    10792 
    10793  Node* currNode = m_FreeList[currLevel].front;
    10794  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    10795  while(currNode->offset != request.offset)
    10796  {
    10797  currNode = currNode->free.next;
    10798  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    10799  }
    10800 
    10801  // Go down, splitting free nodes.
    10802  while(currLevel < targetLevel)
    10803  {
    10804  // currNode is already first free node at currLevel.
    10805  // Remove it from list of free nodes at this currLevel.
    10806  RemoveFromFreeList(currLevel, currNode);
    10807 
    10808  const uint32_t childrenLevel = currLevel + 1;
    10809 
    10810  // Create two free sub-nodes.
    10811  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
    10812  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
    10813 
    10814  leftChild->offset = currNode->offset;
    10815  leftChild->type = Node::TYPE_FREE;
    10816  leftChild->parent = currNode;
    10817  leftChild->buddy = rightChild;
    10818 
    10819  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
    10820  rightChild->type = Node::TYPE_FREE;
    10821  rightChild->parent = currNode;
    10822  rightChild->buddy = leftChild;
    10823 
    10824  // Convert current currNode to split type.
    10825  currNode->type = Node::TYPE_SPLIT;
    10826  currNode->split.leftChild = leftChild;
    10827 
    10828  // Add child nodes to free list. Order is important!
    10829  AddToFreeListFront(childrenLevel, rightChild);
    10830  AddToFreeListFront(childrenLevel, leftChild);
    10831 
    10832  ++m_FreeCount;
    10833  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
    10834  ++currLevel;
    10835  currNode = m_FreeList[currLevel].front;
    10836 
    10837  /*
    10838  We can be sure that currNode, as left child of node previously split,
    10839  also fullfills the alignment requirement.
    10840  */
    10841  }
    10842 
    10843  // Remove from free list.
    10844  VMA_ASSERT(currLevel == targetLevel &&
    10845  currNode != VMA_NULL &&
    10846  currNode->type == Node::TYPE_FREE);
    10847  RemoveFromFreeList(currLevel, currNode);
    10848 
    10849  // Convert to allocation node.
    10850  currNode->type = Node::TYPE_ALLOCATION;
    10851  currNode->allocation.alloc = hAllocation;
    10852 
    10853  ++m_AllocationCount;
    10854  --m_FreeCount;
    10855  m_SumFreeSize -= allocSize;
    10856 }
    10857 
    10858 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
    10859 {
    10860  if(node->type == Node::TYPE_SPLIT)
    10861  {
    10862  DeleteNode(node->split.leftChild->buddy);
    10863  DeleteNode(node->split.leftChild);
    10864  }
    10865 
    10866  vma_delete(GetAllocationCallbacks(), node);
    10867 }
    10868 
    10869 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
    10870 {
    10871  VMA_VALIDATE(level < m_LevelCount);
    10872  VMA_VALIDATE(curr->parent == parent);
    10873  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
    10874  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
    10875  switch(curr->type)
    10876  {
    10877  case Node::TYPE_FREE:
    10878  // curr->free.prev, next are validated separately.
    10879  ctx.calculatedSumFreeSize += levelNodeSize;
    10880  ++ctx.calculatedFreeCount;
    10881  break;
    10882  case Node::TYPE_ALLOCATION:
    10883  ++ctx.calculatedAllocationCount;
    10884  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
    10885  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
    10886  break;
    10887  case Node::TYPE_SPLIT:
    10888  {
    10889  const uint32_t childrenLevel = level + 1;
    10890  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
    10891  const Node* const leftChild = curr->split.leftChild;
    10892  VMA_VALIDATE(leftChild != VMA_NULL);
    10893  VMA_VALIDATE(leftChild->offset == curr->offset);
    10894  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
    10895  {
    10896  VMA_VALIDATE(false && "ValidateNode for left child failed.");
    10897  }
    10898  const Node* const rightChild = leftChild->buddy;
    10899  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
    10900  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
    10901  {
    10902  VMA_VALIDATE(false && "ValidateNode for right child failed.");
    10903  }
    10904  }
    10905  break;
    10906  default:
    10907  return false;
    10908  }
    10909 
    10910  return true;
    10911 }
    10912 
    10913 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
    10914 {
    10915  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
    10916  uint32_t level = 0;
    10917  VkDeviceSize currLevelNodeSize = m_UsableSize;
    10918  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
    10919  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
    10920  {
    10921  ++level;
    10922  currLevelNodeSize = nextLevelNodeSize;
    10923  nextLevelNodeSize = currLevelNodeSize >> 1;
    10924  }
    10925  return level;
    10926 }
    10927 
    10928 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
    10929 {
    10930  // Find node and level.
    10931  Node* node = m_Root;
    10932  VkDeviceSize nodeOffset = 0;
    10933  uint32_t level = 0;
    10934  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
    10935  while(node->type == Node::TYPE_SPLIT)
    10936  {
    10937  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
    10938  if(offset < nodeOffset + nextLevelSize)
    10939  {
    10940  node = node->split.leftChild;
    10941  }
    10942  else
    10943  {
    10944  node = node->split.leftChild->buddy;
    10945  nodeOffset += nextLevelSize;
    10946  }
    10947  ++level;
    10948  levelNodeSize = nextLevelSize;
    10949  }
    10950 
    10951  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
    10952  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
    10953 
    10954  ++m_FreeCount;
    10955  --m_AllocationCount;
    10956  m_SumFreeSize += alloc->GetSize();
    10957 
    10958  node->type = Node::TYPE_FREE;
    10959 
    10960  // Join free nodes if possible.
    10961  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
    10962  {
    10963  RemoveFromFreeList(level, node->buddy);
    10964  Node* const parent = node->parent;
    10965 
    10966  vma_delete(GetAllocationCallbacks(), node->buddy);
    10967  vma_delete(GetAllocationCallbacks(), node);
    10968  parent->type = Node::TYPE_FREE;
    10969 
    10970  node = parent;
    10971  --level;
    10972  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
    10973  --m_FreeCount;
    10974  }
    10975 
    10976  AddToFreeListFront(level, node);
    10977 }
    10978 
    10979 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
    10980 {
    10981  switch(node->type)
    10982  {
    10983  case Node::TYPE_FREE:
    10984  ++outInfo.unusedRangeCount;
    10985  outInfo.unusedBytes += levelNodeSize;
    10986  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
    10987  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
    10988  break;
    10989  case Node::TYPE_ALLOCATION:
    10990  {
    10991  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    10992  ++outInfo.allocationCount;
    10993  outInfo.usedBytes += allocSize;
    10994  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
    10995  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
    10996 
    10997  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
    10998  if(unusedRangeSize > 0)
    10999  {
    11000  ++outInfo.unusedRangeCount;
    11001  outInfo.unusedBytes += unusedRangeSize;
    11002  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
    11003  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
    11004  }
    11005  }
    11006  break;
    11007  case Node::TYPE_SPLIT:
    11008  {
    11009  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    11010  const Node* const leftChild = node->split.leftChild;
    11011  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
    11012  const Node* const rightChild = leftChild->buddy;
    11013  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
    11014  }
    11015  break;
    11016  default:
    11017  VMA_ASSERT(0);
    11018  }
    11019 }
    11020 
    11021 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
    11022 {
    11023  VMA_ASSERT(node->type == Node::TYPE_FREE);
    11024 
    11025  // List is empty.
    11026  Node* const frontNode = m_FreeList[level].front;
    11027  if(frontNode == VMA_NULL)
    11028  {
    11029  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
    11030  node->free.prev = node->free.next = VMA_NULL;
    11031  m_FreeList[level].front = m_FreeList[level].back = node;
    11032  }
    11033  else
    11034  {
    11035  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
    11036  node->free.prev = VMA_NULL;
    11037  node->free.next = frontNode;
    11038  frontNode->free.prev = node;
    11039  m_FreeList[level].front = node;
    11040  }
    11041 }
    11042 
    11043 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
    11044 {
    11045  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
    11046 
    11047  // It is at the front.
    11048  if(node->free.prev == VMA_NULL)
    11049  {
    11050  VMA_ASSERT(m_FreeList[level].front == node);
    11051  m_FreeList[level].front = node->free.next;
    11052  }
    11053  else
    11054  {
    11055  Node* const prevFreeNode = node->free.prev;
    11056  VMA_ASSERT(prevFreeNode->free.next == node);
    11057  prevFreeNode->free.next = node->free.next;
    11058  }
    11059 
    11060  // It is at the back.
    11061  if(node->free.next == VMA_NULL)
    11062  {
    11063  VMA_ASSERT(m_FreeList[level].back == node);
    11064  m_FreeList[level].back = node->free.prev;
    11065  }
    11066  else
    11067  {
    11068  Node* const nextFreeNode = node->free.next;
    11069  VMA_ASSERT(nextFreeNode->free.prev == node);
    11070  nextFreeNode->free.prev = node->free.prev;
    11071  }
    11072 }
    11073 
    11074 #if VMA_STATS_STRING_ENABLED
    11075 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
    11076 {
    11077  switch(node->type)
    11078  {
    11079  case Node::TYPE_FREE:
    11080  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
    11081  break;
    11082  case Node::TYPE_ALLOCATION:
    11083  {
    11084  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
    11085  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    11086  if(allocSize < levelNodeSize)
    11087  {
    11088  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
    11089  }
    11090  }
    11091  break;
    11092  case Node::TYPE_SPLIT:
    11093  {
    11094  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    11095  const Node* const leftChild = node->split.leftChild;
    11096  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
    11097  const Node* const rightChild = leftChild->buddy;
    11098  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
    11099  }
    11100  break;
    11101  default:
    11102  VMA_ASSERT(0);
    11103  }
    11104 }
    11105 #endif // #if VMA_STATS_STRING_ENABLED
    11106 
    11107 
    11109 // class VmaDeviceMemoryBlock
    11110 
    11111 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    11112  m_pMetadata(VMA_NULL),
    11113  m_MemoryTypeIndex(UINT32_MAX),
    11114  m_Id(0),
    11115  m_hMemory(VK_NULL_HANDLE),
    11116  m_MapCount(0),
    11117  m_pMappedData(VMA_NULL)
    11118 {
    11119 }
    11120 
    11121 void VmaDeviceMemoryBlock::Init(
    11122  VmaAllocator hAllocator,
    11123  VmaPool hParentPool,
    11124  uint32_t newMemoryTypeIndex,
    11125  VkDeviceMemory newMemory,
    11126  VkDeviceSize newSize,
    11127  uint32_t id,
    11128  uint32_t algorithm)
    11129 {
    11130  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    11131 
    11132  m_hParentPool = hParentPool;
    11133  m_MemoryTypeIndex = newMemoryTypeIndex;
    11134  m_Id = id;
    11135  m_hMemory = newMemory;
    11136 
    11137  switch(algorithm)
    11138  {
    11140  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
    11141  break;
    11143  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
    11144  break;
    11145  default:
    11146  VMA_ASSERT(0);
    11147  // Fall-through.
    11148  case 0:
    11149  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
    11150  }
    11151  m_pMetadata->Init(newSize);
    11152 }
    11153 
    11154 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    11155 {
    11156  // This is the most important assert in the entire library.
    11157  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    11158  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    11159 
    11160  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    11161  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
    11162  m_hMemory = VK_NULL_HANDLE;
    11163 
    11164  vma_delete(allocator, m_pMetadata);
    11165  m_pMetadata = VMA_NULL;
    11166 }
    11167 
    11168 bool VmaDeviceMemoryBlock::Validate() const
    11169 {
    11170  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
    11171  (m_pMetadata->GetSize() != 0));
    11172 
    11173  return m_pMetadata->Validate();
    11174 }
    11175 
    11176 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
    11177 {
    11178  void* pData = nullptr;
    11179  VkResult res = Map(hAllocator, 1, &pData);
    11180  if(res != VK_SUCCESS)
    11181  {
    11182  return res;
    11183  }
    11184 
    11185  res = m_pMetadata->CheckCorruption(pData);
    11186 
    11187  Unmap(hAllocator, 1);
    11188 
    11189  return res;
    11190 }
    11191 
    11192 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
    11193 {
    11194  if(count == 0)
    11195  {
    11196  return VK_SUCCESS;
    11197  }
    11198 
    11199  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11200  if(m_MapCount != 0)
    11201  {
    11202  m_MapCount += count;
    11203  VMA_ASSERT(m_pMappedData != VMA_NULL);
    11204  if(ppData != VMA_NULL)
    11205  {
    11206  *ppData = m_pMappedData;
    11207  }
    11208  return VK_SUCCESS;
    11209  }
    11210  else
    11211  {
    11212  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    11213  hAllocator->m_hDevice,
    11214  m_hMemory,
    11215  0, // offset
    11216  VK_WHOLE_SIZE,
    11217  0, // flags
    11218  &m_pMappedData);
    11219  if(result == VK_SUCCESS)
    11220  {
    11221  if(ppData != VMA_NULL)
    11222  {
    11223  *ppData = m_pMappedData;
    11224  }
    11225  m_MapCount = count;
    11226  }
    11227  return result;
    11228  }
    11229 }
    11230 
    11231 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
    11232 {
    11233  if(count == 0)
    11234  {
    11235  return;
    11236  }
    11237 
    11238  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11239  if(m_MapCount >= count)
    11240  {
    11241  m_MapCount -= count;
    11242  if(m_MapCount == 0)
    11243  {
    11244  m_pMappedData = VMA_NULL;
    11245  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
    11246  }
    11247  }
    11248  else
    11249  {
    11250  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    11251  }
    11252 }
    11253 
    11254 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    11255 {
    11256  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    11257  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    11258 
    11259  void* pData;
    11260  VkResult res = Map(hAllocator, 1, &pData);
    11261  if(res != VK_SUCCESS)
    11262  {
    11263  return res;
    11264  }
    11265 
    11266  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
    11267  VmaWriteMagicValue(pData, allocOffset + allocSize);
    11268 
    11269  Unmap(hAllocator, 1);
    11270 
    11271  return VK_SUCCESS;
    11272 }
    11273 
    11274 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    11275 {
    11276  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    11277  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    11278 
    11279  void* pData;
    11280  VkResult res = Map(hAllocator, 1, &pData);
    11281  if(res != VK_SUCCESS)
    11282  {
    11283  return res;
    11284  }
    11285 
    11286  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
    11287  {
    11288  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
    11289  }
    11290  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
    11291  {
    11292  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
    11293  }
    11294 
    11295  Unmap(hAllocator, 1);
    11296 
    11297  return VK_SUCCESS;
    11298 }
    11299 
    11300 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
    11301  const VmaAllocator hAllocator,
    11302  const VmaAllocation hAllocation,
    11303  VkBuffer hBuffer)
    11304 {
    11305  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    11306  hAllocation->GetBlock() == this);
    11307  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    11308  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11309  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
    11310  hAllocator->m_hDevice,
    11311  hBuffer,
    11312  m_hMemory,
    11313  hAllocation->GetOffset());
    11314 }
    11315 
    11316 VkResult VmaDeviceMemoryBlock::BindImageMemory(
    11317  const VmaAllocator hAllocator,
    11318  const VmaAllocation hAllocation,
    11319  VkImage hImage)
    11320 {
    11321  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    11322  hAllocation->GetBlock() == this);
    11323  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    11324  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11325  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
    11326  hAllocator->m_hDevice,
    11327  hImage,
    11328  m_hMemory,
    11329  hAllocation->GetOffset());
    11330 }
    11331 
    11332 static void InitStatInfo(VmaStatInfo& outInfo)
    11333 {
    11334  memset(&outInfo, 0, sizeof(outInfo));
    11335  outInfo.allocationSizeMin = UINT64_MAX;
    11336  outInfo.unusedRangeSizeMin = UINT64_MAX;
    11337 }
    11338 
    11339 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    11340 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    11341 {
    11342  inoutInfo.blockCount += srcInfo.blockCount;
    11343  inoutInfo.allocationCount += srcInfo.allocationCount;
    11344  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    11345  inoutInfo.usedBytes += srcInfo.usedBytes;
    11346  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    11347  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    11348  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    11349  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    11350  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    11351 }
    11352 
    11353 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    11354 {
    11355  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    11356  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    11357  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    11358  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    11359 }
    11360 
    11361 VmaPool_T::VmaPool_T(
    11362  VmaAllocator hAllocator,
    11363  const VmaPoolCreateInfo& createInfo,
    11364  VkDeviceSize preferredBlockSize) :
    11365  m_BlockVector(
    11366  hAllocator,
    11367  this, // hParentPool
    11368  createInfo.memoryTypeIndex,
    11369  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
    11370  createInfo.minBlockCount,
    11371  createInfo.maxBlockCount,
    11372  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    11373  createInfo.frameInUseCount,
    11374  true, // isCustomPool
    11375  createInfo.blockSize != 0, // explicitBlockSize
    11376  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
    11377  m_Id(0)
    11378 {
    11379 }
    11380 
    11381 VmaPool_T::~VmaPool_T()
    11382 {
    11383 }
    11384 
    11385 #if VMA_STATS_STRING_ENABLED
    11386 
    11387 #endif // #if VMA_STATS_STRING_ENABLED
    11388 
    11389 VmaBlockVector::VmaBlockVector(
    11390  VmaAllocator hAllocator,
    11391  VmaPool hParentPool,
    11392  uint32_t memoryTypeIndex,
    11393  VkDeviceSize preferredBlockSize,
    11394  size_t minBlockCount,
    11395  size_t maxBlockCount,
    11396  VkDeviceSize bufferImageGranularity,
    11397  uint32_t frameInUseCount,
    11398  bool isCustomPool,
    11399  bool explicitBlockSize,
    11400  uint32_t algorithm) :
    11401  m_hAllocator(hAllocator),
    11402  m_hParentPool(hParentPool),
    11403  m_MemoryTypeIndex(memoryTypeIndex),
    11404  m_PreferredBlockSize(preferredBlockSize),
    11405  m_MinBlockCount(minBlockCount),
    11406  m_MaxBlockCount(maxBlockCount),
    11407  m_BufferImageGranularity(bufferImageGranularity),
    11408  m_FrameInUseCount(frameInUseCount),
    11409  m_IsCustomPool(isCustomPool),
    11410  m_ExplicitBlockSize(explicitBlockSize),
    11411  m_Algorithm(algorithm),
    11412  m_HasEmptyBlock(false),
    11413  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    11414  m_NextBlockId(0)
    11415 {
    11416 }
    11417 
    11418 VmaBlockVector::~VmaBlockVector()
    11419 {
    11420  for(size_t i = m_Blocks.size(); i--; )
    11421  {
    11422  m_Blocks[i]->Destroy(m_hAllocator);
    11423  vma_delete(m_hAllocator, m_Blocks[i]);
    11424  }
    11425 }
    11426 
    11427 VkResult VmaBlockVector::CreateMinBlocks()
    11428 {
    11429  for(size_t i = 0; i < m_MinBlockCount; ++i)
    11430  {
    11431  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    11432  if(res != VK_SUCCESS)
    11433  {
    11434  return res;
    11435  }
    11436  }
    11437  return VK_SUCCESS;
    11438 }
    11439 
    11440 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    11441 {
    11442  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    11443 
    11444  const size_t blockCount = m_Blocks.size();
    11445 
    11446  pStats->size = 0;
    11447  pStats->unusedSize = 0;
    11448  pStats->allocationCount = 0;
    11449  pStats->unusedRangeCount = 0;
    11450  pStats->unusedRangeSizeMax = 0;
    11451  pStats->blockCount = blockCount;
    11452 
    11453  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11454  {
    11455  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11456  VMA_ASSERT(pBlock);
    11457  VMA_HEAVY_ASSERT(pBlock->Validate());
    11458  pBlock->m_pMetadata->AddPoolStats(*pStats);
    11459  }
    11460 }
    11461 
    11462 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
    11463 {
    11464  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    11465  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
    11466  (VMA_DEBUG_MARGIN > 0) &&
    11467  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
    11468  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
    11469 }
    11470 
    11471 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    11472 
    11473 VkResult VmaBlockVector::Allocate(
    11474  uint32_t currentFrameIndex,
    11475  VkDeviceSize size,
    11476  VkDeviceSize alignment,
    11477  const VmaAllocationCreateInfo& createInfo,
    11478  VmaSuballocationType suballocType,
    11479  size_t allocationCount,
    11480  VmaAllocation* pAllocations)
    11481 {
    11482  size_t allocIndex;
    11483  VkResult res = VK_SUCCESS;
    11484 
    11485  if(IsCorruptionDetectionEnabled())
    11486  {
    11487  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
    11488  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
    11489  }
    11490 
    11491  {
    11492  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
    11493  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    11494  {
    11495  res = AllocatePage(
    11496  currentFrameIndex,
    11497  size,
    11498  alignment,
    11499  createInfo,
    11500  suballocType,
    11501  pAllocations + allocIndex);
    11502  if(res != VK_SUCCESS)
    11503  {
    11504  break;
    11505  }
    11506  }
    11507  }
    11508 
    11509  if(res != VK_SUCCESS)
    11510  {
    11511  // Free all already created allocations.
    11512  while(allocIndex--)
    11513  {
    11514  Free(pAllocations[allocIndex]);
    11515  }
    11516  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
    11517  }
    11518 
    11519  return res;
    11520 }
    11521 
    11522 VkResult VmaBlockVector::AllocatePage(
    11523  uint32_t currentFrameIndex,
    11524  VkDeviceSize size,
    11525  VkDeviceSize alignment,
    11526  const VmaAllocationCreateInfo& createInfo,
    11527  VmaSuballocationType suballocType,
    11528  VmaAllocation* pAllocation)
    11529 {
    11530  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    11531  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    11532  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    11533  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    11534  const bool canCreateNewBlock =
    11535  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    11536  (m_Blocks.size() < m_MaxBlockCount);
    11537  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
    11538 
    11539  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
    11540  // Which in turn is available only when maxBlockCount = 1.
    11541  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
    11542  {
    11543  canMakeOtherLost = false;
    11544  }
    11545 
    11546  // Upper address can only be used with linear allocator and within single memory block.
    11547  if(isUpperAddress &&
    11548  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
    11549  {
    11550  return VK_ERROR_FEATURE_NOT_PRESENT;
    11551  }
    11552 
    11553  // Validate strategy.
    11554  switch(strategy)
    11555  {
    11556  case 0:
    11558  break;
    11562  break;
    11563  default:
    11564  return VK_ERROR_FEATURE_NOT_PRESENT;
    11565  }
    11566 
    11567  // Early reject: requested allocation size is larger that maximum block size for this block vector.
    11568  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
    11569  {
    11570  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    11571  }
    11572 
    11573  /*
    11574  Under certain condition, this whole section can be skipped for optimization, so
    11575  we move on directly to trying to allocate with canMakeOtherLost. That's the case
    11576  e.g. for custom pools with linear algorithm.
    11577  */
    11578  if(!canMakeOtherLost || canCreateNewBlock)
    11579  {
    11580  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    11581  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
    11583 
    11584  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    11585  {
    11586  // Use only last block.
    11587  if(!m_Blocks.empty())
    11588  {
    11589  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
    11590  VMA_ASSERT(pCurrBlock);
    11591  VkResult res = AllocateFromBlock(
    11592  pCurrBlock,
    11593  currentFrameIndex,
    11594  size,
    11595  alignment,
    11596  allocFlagsCopy,
    11597  createInfo.pUserData,
    11598  suballocType,
    11599  strategy,
    11600  pAllocation);
    11601  if(res == VK_SUCCESS)
    11602  {
    11603  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
    11604  return VK_SUCCESS;
    11605  }
    11606  }
    11607  }
    11608  else
    11609  {
    11611  {
    11612  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    11613  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    11614  {
    11615  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11616  VMA_ASSERT(pCurrBlock);
    11617  VkResult res = AllocateFromBlock(
    11618  pCurrBlock,
    11619  currentFrameIndex,
    11620  size,
    11621  alignment,
    11622  allocFlagsCopy,
    11623  createInfo.pUserData,
    11624  suballocType,
    11625  strategy,
    11626  pAllocation);
    11627  if(res == VK_SUCCESS)
    11628  {
    11629  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    11630  return VK_SUCCESS;
    11631  }
    11632  }
    11633  }
    11634  else // WORST_FIT, FIRST_FIT
    11635  {
    11636  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    11637  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    11638  {
    11639  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11640  VMA_ASSERT(pCurrBlock);
    11641  VkResult res = AllocateFromBlock(
    11642  pCurrBlock,
    11643  currentFrameIndex,
    11644  size,
    11645  alignment,
    11646  allocFlagsCopy,
    11647  createInfo.pUserData,
    11648  suballocType,
    11649  strategy,
    11650  pAllocation);
    11651  if(res == VK_SUCCESS)
    11652  {
    11653  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    11654  return VK_SUCCESS;
    11655  }
    11656  }
    11657  }
    11658  }
    11659 
    11660  // 2. Try to create new block.
    11661  if(canCreateNewBlock)
    11662  {
    11663  // Calculate optimal size for new block.
    11664  VkDeviceSize newBlockSize = m_PreferredBlockSize;
    11665  uint32_t newBlockSizeShift = 0;
    11666  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
    11667 
    11668  if(!m_ExplicitBlockSize)
    11669  {
    11670  // Allocate 1/8, 1/4, 1/2 as first blocks.
    11671  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
    11672  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
    11673  {
    11674  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    11675  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
    11676  {
    11677  newBlockSize = smallerNewBlockSize;
    11678  ++newBlockSizeShift;
    11679  }
    11680  else
    11681  {
    11682  break;
    11683  }
    11684  }
    11685  }
    11686 
    11687  size_t newBlockIndex = 0;
    11688  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
    11689  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
    11690  if(!m_ExplicitBlockSize)
    11691  {
    11692  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
    11693  {
    11694  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    11695  if(smallerNewBlockSize >= size)
    11696  {
    11697  newBlockSize = smallerNewBlockSize;
    11698  ++newBlockSizeShift;
    11699  res = CreateBlock(newBlockSize, &newBlockIndex);
    11700  }
    11701  else
    11702  {
    11703  break;
    11704  }
    11705  }
    11706  }
    11707 
    11708  if(res == VK_SUCCESS)
    11709  {
    11710  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    11711  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
    11712 
    11713  res = AllocateFromBlock(
    11714  pBlock,
    11715  currentFrameIndex,
    11716  size,
    11717  alignment,
    11718  allocFlagsCopy,
    11719  createInfo.pUserData,
    11720  suballocType,
    11721  strategy,
    11722  pAllocation);
    11723  if(res == VK_SUCCESS)
    11724  {
    11725  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
    11726  return VK_SUCCESS;
    11727  }
    11728  else
    11729  {
    11730  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
    11731  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    11732  }
    11733  }
    11734  }
    11735  }
    11736 
    11737  // 3. Try to allocate from existing blocks with making other allocations lost.
    11738  if(canMakeOtherLost)
    11739  {
    11740  uint32_t tryIndex = 0;
    11741  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    11742  {
    11743  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    11744  VmaAllocationRequest bestRequest = {};
    11745  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    11746 
    11747  // 1. Search existing allocations.
    11749  {
    11750  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    11751  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    11752  {
    11753  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11754  VMA_ASSERT(pCurrBlock);
    11755  VmaAllocationRequest currRequest = {};
    11756  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    11757  currentFrameIndex,
    11758  m_FrameInUseCount,
    11759  m_BufferImageGranularity,
    11760  size,
    11761  alignment,
    11762  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    11763  suballocType,
    11764  canMakeOtherLost,
    11765  strategy,
    11766  &currRequest))
    11767  {
    11768  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    11769  if(pBestRequestBlock == VMA_NULL ||
    11770  currRequestCost < bestRequestCost)
    11771  {
    11772  pBestRequestBlock = pCurrBlock;
    11773  bestRequest = currRequest;
    11774  bestRequestCost = currRequestCost;
    11775 
    11776  if(bestRequestCost == 0)
    11777  {
    11778  break;
    11779  }
    11780  }
    11781  }
    11782  }
    11783  }
    11784  else // WORST_FIT, FIRST_FIT
    11785  {
    11786  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    11787  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    11788  {
    11789  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11790  VMA_ASSERT(pCurrBlock);
    11791  VmaAllocationRequest currRequest = {};
    11792  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    11793  currentFrameIndex,
    11794  m_FrameInUseCount,
    11795  m_BufferImageGranularity,
    11796  size,
    11797  alignment,
    11798  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    11799  suballocType,
    11800  canMakeOtherLost,
    11801  strategy,
    11802  &currRequest))
    11803  {
    11804  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    11805  if(pBestRequestBlock == VMA_NULL ||
    11806  currRequestCost < bestRequestCost ||
    11808  {
    11809  pBestRequestBlock = pCurrBlock;
    11810  bestRequest = currRequest;
    11811  bestRequestCost = currRequestCost;
    11812 
    11813  if(bestRequestCost == 0 ||
    11815  {
    11816  break;
    11817  }
    11818  }
    11819  }
    11820  }
    11821  }
    11822 
    11823  if(pBestRequestBlock != VMA_NULL)
    11824  {
    11825  if(mapped)
    11826  {
    11827  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
    11828  if(res != VK_SUCCESS)
    11829  {
    11830  return res;
    11831  }
    11832  }
    11833 
    11834  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
    11835  currentFrameIndex,
    11836  m_FrameInUseCount,
    11837  &bestRequest))
    11838  {
    11839  // We no longer have an empty Allocation.
    11840  if(pBestRequestBlock->m_pMetadata->IsEmpty())
    11841  {
    11842  m_HasEmptyBlock = false;
    11843  }
    11844  // Allocate from this pBlock.
    11845  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
    11846  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
    11847  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
    11848  (*pAllocation)->InitBlockAllocation(
    11849  pBestRequestBlock,
    11850  bestRequest.offset,
    11851  alignment,
    11852  size,
    11853  suballocType,
    11854  mapped,
    11855  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    11856  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
    11857  VMA_DEBUG_LOG(" Returned from existing block");
    11858  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    11859  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    11860  {
    11861  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    11862  }
    11863  if(IsCorruptionDetectionEnabled())
    11864  {
    11865  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
    11866  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    11867  }
    11868  return VK_SUCCESS;
    11869  }
    11870  // else: Some allocations must have been touched while we are here. Next try.
    11871  }
    11872  else
    11873  {
    11874  // Could not find place in any of the blocks - break outer loop.
    11875  break;
    11876  }
    11877  }
    11878  /* Maximum number of tries exceeded - a very unlike event when many other
    11879  threads are simultaneously touching allocations making it impossible to make
    11880  lost at the same time as we try to allocate. */
    11881  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    11882  {
    11883  return VK_ERROR_TOO_MANY_OBJECTS;
    11884  }
    11885  }
    11886 
    11887  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    11888 }
    11889 
    11890 void VmaBlockVector::Free(
    11891  VmaAllocation hAllocation)
    11892 {
    11893  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    11894 
    11895  // Scope for lock.
    11896  {
    11897  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
    11898 
    11899  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    11900 
    11901  if(IsCorruptionDetectionEnabled())
    11902  {
    11903  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
    11904  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
    11905  }
    11906 
    11907  if(hAllocation->IsPersistentMap())
    11908  {
    11909  pBlock->Unmap(m_hAllocator, 1);
    11910  }
    11911 
    11912  pBlock->m_pMetadata->Free(hAllocation);
    11913  VMA_HEAVY_ASSERT(pBlock->Validate());
    11914 
    11915  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
    11916 
    11917  // pBlock became empty after this deallocation.
    11918  if(pBlock->m_pMetadata->IsEmpty())
    11919  {
    11920  // Already has empty Allocation. We don't want to have two, so delete this one.
    11921  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    11922  {
    11923  pBlockToDelete = pBlock;
    11924  Remove(pBlock);
    11925  }
    11926  // We now have first empty block.
    11927  else
    11928  {
    11929  m_HasEmptyBlock = true;
    11930  }
    11931  }
    11932  // pBlock didn't become empty, but we have another empty block - find and free that one.
    11933  // (This is optional, heuristics.)
    11934  else if(m_HasEmptyBlock)
    11935  {
    11936  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    11937  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    11938  {
    11939  pBlockToDelete = pLastBlock;
    11940  m_Blocks.pop_back();
    11941  m_HasEmptyBlock = false;
    11942  }
    11943  }
    11944 
    11945  IncrementallySortBlocks();
    11946  }
    11947 
    11948  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    11949  // lock, for performance reason.
    11950  if(pBlockToDelete != VMA_NULL)
    11951  {
    11952  VMA_DEBUG_LOG(" Deleted empty allocation");
    11953  pBlockToDelete->Destroy(m_hAllocator);
    11954  vma_delete(m_hAllocator, pBlockToDelete);
    11955  }
    11956 }
    11957 
    11958 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
    11959 {
    11960  VkDeviceSize result = 0;
    11961  for(size_t i = m_Blocks.size(); i--; )
    11962  {
    11963  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
    11964  if(result >= m_PreferredBlockSize)
    11965  {
    11966  break;
    11967  }
    11968  }
    11969  return result;
    11970 }
    11971 
    11972 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    11973 {
    11974  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11975  {
    11976  if(m_Blocks[blockIndex] == pBlock)
    11977  {
    11978  VmaVectorRemove(m_Blocks, blockIndex);
    11979  return;
    11980  }
    11981  }
    11982  VMA_ASSERT(0);
    11983 }
    11984 
    11985 void VmaBlockVector::IncrementallySortBlocks()
    11986 {
    11987  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    11988  {
    11989  // Bubble sort only until first swap.
    11990  for(size_t i = 1; i < m_Blocks.size(); ++i)
    11991  {
    11992  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
    11993  {
    11994  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    11995  return;
    11996  }
    11997  }
    11998  }
    11999 }
    12000 
    12001 VkResult VmaBlockVector::AllocateFromBlock(
    12002  VmaDeviceMemoryBlock* pBlock,
    12003  uint32_t currentFrameIndex,
    12004  VkDeviceSize size,
    12005  VkDeviceSize alignment,
    12006  VmaAllocationCreateFlags allocFlags,
    12007  void* pUserData,
    12008  VmaSuballocationType suballocType,
    12009  uint32_t strategy,
    12010  VmaAllocation* pAllocation)
    12011 {
    12012  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
    12013  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    12014  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    12015  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    12016 
    12017  VmaAllocationRequest currRequest = {};
    12018  if(pBlock->m_pMetadata->CreateAllocationRequest(
    12019  currentFrameIndex,
    12020  m_FrameInUseCount,
    12021  m_BufferImageGranularity,
    12022  size,
    12023  alignment,
    12024  isUpperAddress,
    12025  suballocType,
    12026  false, // canMakeOtherLost
    12027  strategy,
    12028  &currRequest))
    12029  {
    12030  // Allocate from pCurrBlock.
    12031  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    12032 
    12033  if(mapped)
    12034  {
    12035  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
    12036  if(res != VK_SUCCESS)
    12037  {
    12038  return res;
    12039  }
    12040  }
    12041 
    12042  // We no longer have an empty Allocation.
    12043  if(pBlock->m_pMetadata->IsEmpty())
    12044  {
    12045  m_HasEmptyBlock = false;
    12046  }
    12047 
    12048  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
    12049  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
    12050  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
    12051  (*pAllocation)->InitBlockAllocation(
    12052  pBlock,
    12053  currRequest.offset,
    12054  alignment,
    12055  size,
    12056  suballocType,
    12057  mapped,
    12058  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    12059  VMA_HEAVY_ASSERT(pBlock->Validate());
    12060  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
    12061  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12062  {
    12063  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    12064  }
    12065  if(IsCorruptionDetectionEnabled())
    12066  {
    12067  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
    12068  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    12069  }
    12070  return VK_SUCCESS;
    12071  }
    12072  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12073 }
    12074 
    12075 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    12076 {
    12077  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    12078  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    12079  allocInfo.allocationSize = blockSize;
    12080  VkDeviceMemory mem = VK_NULL_HANDLE;
    12081  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    12082  if(res < 0)
    12083  {
    12084  return res;
    12085  }
    12086 
    12087  // New VkDeviceMemory successfully created.
    12088 
    12089  // Create new Allocation for it.
    12090  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    12091  pBlock->Init(
    12092  m_hAllocator,
    12093  m_hParentPool,
    12094  m_MemoryTypeIndex,
    12095  mem,
    12096  allocInfo.allocationSize,
    12097  m_NextBlockId++,
    12098  m_Algorithm);
    12099 
    12100  m_Blocks.push_back(pBlock);
    12101  if(pNewBlockIndex != VMA_NULL)
    12102  {
    12103  *pNewBlockIndex = m_Blocks.size() - 1;
    12104  }
    12105 
    12106  return VK_SUCCESS;
    12107 }
    12108 
    12109 void VmaBlockVector::ApplyDefragmentationMovesCpu(
    12110  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    12111  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
    12112 {
    12113  const size_t blockCount = m_Blocks.size();
    12114  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
    12115 
    12116  enum BLOCK_FLAG
    12117  {
    12118  BLOCK_FLAG_USED = 0x00000001,
    12119  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
    12120  };
    12121 
    12122  struct BlockInfo
    12123  {
    12124  uint32_t flags;
    12125  void* pMappedData;
    12126  };
    12127  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
    12128  blockInfo(blockCount, VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
    12129  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
    12130 
    12131  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
    12132  const size_t moveCount = moves.size();
    12133  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12134  {
    12135  const VmaDefragmentationMove& move = moves[moveIndex];
    12136  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
    12137  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
    12138  }
    12139 
    12140  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
    12141 
    12142  // Go over all blocks. Get mapped pointer or map if necessary.
    12143  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
    12144  {
    12145  BlockInfo& currBlockInfo = blockInfo[blockIndex];
    12146  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12147  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
    12148  {
    12149  currBlockInfo.pMappedData = pBlock->GetMappedData();
    12150  // It is not originally mapped - map it.
    12151  if(currBlockInfo.pMappedData == VMA_NULL)
    12152  {
    12153  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
    12154  if(pDefragCtx->res == VK_SUCCESS)
    12155  {
    12156  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
    12157  }
    12158  }
    12159  }
    12160  }
    12161 
    12162  // Go over all moves. Do actual data transfer.
    12163  if(pDefragCtx->res == VK_SUCCESS)
    12164  {
    12165  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    12166  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    12167 
    12168  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12169  {
    12170  const VmaDefragmentationMove& move = moves[moveIndex];
    12171 
    12172  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
    12173  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
    12174 
    12175  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
    12176 
    12177  // Invalidate source.
    12178  if(isNonCoherent)
    12179  {
    12180  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
    12181  memRange.memory = pSrcBlock->GetDeviceMemory();
    12182  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
    12183  memRange.size = VMA_MIN(
    12184  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
    12185  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
    12186  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
    12187  }
    12188 
    12189  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    12190  memmove(
    12191  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
    12192  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
    12193  static_cast<size_t>(move.size));
    12194 
    12195  if(IsCorruptionDetectionEnabled())
    12196  {
    12197  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
    12198  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
    12199  }
    12200 
    12201  // Flush destination.
    12202  if(isNonCoherent)
    12203  {
    12204  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
    12205  memRange.memory = pDstBlock->GetDeviceMemory();
    12206  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
    12207  memRange.size = VMA_MIN(
    12208  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
    12209  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
    12210  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
    12211  }
    12212  }
    12213  }
    12214 
    12215  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
    12216  // Regardless of pCtx->res == VK_SUCCESS.
    12217  for(size_t blockIndex = blockCount; blockIndex--; )
    12218  {
    12219  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
    12220  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
    12221  {
    12222  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12223  pBlock->Unmap(m_hAllocator, 1);
    12224  }
    12225  }
    12226 }
    12227 
    12228 void VmaBlockVector::ApplyDefragmentationMovesGpu(
    12229  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    12230  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12231  VkCommandBuffer commandBuffer)
    12232 {
    12233  const size_t blockCount = m_Blocks.size();
    12234 
    12235  pDefragCtx->blockContexts.resize(blockCount);
    12236  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
    12237 
    12238  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
    12239  const size_t moveCount = moves.size();
    12240  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12241  {
    12242  const VmaDefragmentationMove& move = moves[moveIndex];
    12243  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
    12244  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
    12245  }
    12246 
    12247  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
    12248 
    12249  // Go over all blocks. Create and bind buffer for whole block if necessary.
    12250  {
    12251  VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
    12252  bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
    12253  VK_BUFFER_USAGE_TRANSFER_DST_BIT;
    12254 
    12255  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
    12256  {
    12257  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
    12258  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12259  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
    12260  {
    12261  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
    12262  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
    12263  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
    12264  if(pDefragCtx->res == VK_SUCCESS)
    12265  {
    12266  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
    12267  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
    12268  }
    12269  }
    12270  }
    12271  }
    12272 
    12273  // Go over all moves. Post data transfer commands to command buffer.
    12274  if(pDefragCtx->res == VK_SUCCESS)
    12275  {
    12276  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    12277  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    12278 
    12279  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12280  {
    12281  const VmaDefragmentationMove& move = moves[moveIndex];
    12282 
    12283  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
    12284  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
    12285 
    12286  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
    12287 
    12288  VkBufferCopy region = {
    12289  move.srcOffset,
    12290  move.dstOffset,
    12291  move.size };
    12292  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
    12293  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
    12294  }
    12295  }
    12296 
    12297  // Save buffers to defrag context for later destruction.
    12298  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
    12299  {
    12300  pDefragCtx->res = VK_NOT_READY;
    12301  }
    12302 }
    12303 
    12304 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
    12305 {
    12306  m_HasEmptyBlock = false;
    12307  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    12308  {
    12309  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12310  if(pBlock->m_pMetadata->IsEmpty())
    12311  {
    12312  if(m_Blocks.size() > m_MinBlockCount)
    12313  {
    12314  if(pDefragmentationStats != VMA_NULL)
    12315  {
    12316  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    12317  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
    12318  }
    12319 
    12320  VmaVectorRemove(m_Blocks, blockIndex);
    12321  pBlock->Destroy(m_hAllocator);
    12322  vma_delete(m_hAllocator, pBlock);
    12323  }
    12324  else
    12325  {
    12326  m_HasEmptyBlock = true;
    12327  }
    12328  }
    12329  }
    12330 }
    12331 
    12332 #if VMA_STATS_STRING_ENABLED
    12333 
    12334 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    12335 {
    12336  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    12337 
    12338  json.BeginObject();
    12339 
    12340  if(m_IsCustomPool)
    12341  {
    12342  json.WriteString("MemoryTypeIndex");
    12343  json.WriteNumber(m_MemoryTypeIndex);
    12344 
    12345  json.WriteString("BlockSize");
    12346  json.WriteNumber(m_PreferredBlockSize);
    12347 
    12348  json.WriteString("BlockCount");
    12349  json.BeginObject(true);
    12350  if(m_MinBlockCount > 0)
    12351  {
    12352  json.WriteString("Min");
    12353  json.WriteNumber((uint64_t)m_MinBlockCount);
    12354  }
    12355  if(m_MaxBlockCount < SIZE_MAX)
    12356  {
    12357  json.WriteString("Max");
    12358  json.WriteNumber((uint64_t)m_MaxBlockCount);
    12359  }
    12360  json.WriteString("Cur");
    12361  json.WriteNumber((uint64_t)m_Blocks.size());
    12362  json.EndObject();
    12363 
    12364  if(m_FrameInUseCount > 0)
    12365  {
    12366  json.WriteString("FrameInUseCount");
    12367  json.WriteNumber(m_FrameInUseCount);
    12368  }
    12369 
    12370  if(m_Algorithm != 0)
    12371  {
    12372  json.WriteString("Algorithm");
    12373  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
    12374  }
    12375  }
    12376  else
    12377  {
    12378  json.WriteString("PreferredBlockSize");
    12379  json.WriteNumber(m_PreferredBlockSize);
    12380  }
    12381 
    12382  json.WriteString("Blocks");
    12383  json.BeginObject();
    12384  for(size_t i = 0; i < m_Blocks.size(); ++i)
    12385  {
    12386  json.BeginString();
    12387  json.ContinueString(m_Blocks[i]->GetId());
    12388  json.EndString();
    12389 
    12390  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
    12391  }
    12392  json.EndObject();
    12393 
    12394  json.EndObject();
    12395 }
    12396 
    12397 #endif // #if VMA_STATS_STRING_ENABLED
    12398 
    12399 void VmaBlockVector::Defragment(
    12400  class VmaBlockVectorDefragmentationContext* pCtx,
    12401  VmaDefragmentationStats* pStats,
    12402  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
    12403  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
    12404  VkCommandBuffer commandBuffer)
    12405 {
    12406  pCtx->res = VK_SUCCESS;
    12407 
    12408  const VkMemoryPropertyFlags memPropFlags =
    12409  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
    12410  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
    12411  const bool isHostCoherent = (memPropFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0;
    12412 
    12413  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
    12414  isHostVisible;
    12415  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
    12416  !IsCorruptionDetectionEnabled();
    12417 
    12418  // There are options to defragment this memory type.
    12419  if(canDefragmentOnCpu || canDefragmentOnGpu)
    12420  {
    12421  bool defragmentOnGpu;
    12422  // There is only one option to defragment this memory type.
    12423  if(canDefragmentOnGpu != canDefragmentOnCpu)
    12424  {
    12425  defragmentOnGpu = canDefragmentOnGpu;
    12426  }
    12427  // Both options are available: Heuristics to choose the best one.
    12428  else
    12429  {
    12430  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
    12431  m_hAllocator->IsIntegratedGpu();
    12432  }
    12433 
    12434  bool overlappingMoveSupported = !defragmentOnGpu;
    12435 
    12436  if(m_hAllocator->m_UseMutex)
    12437  {
    12438  m_Mutex.LockWrite();
    12439  pCtx->mutexLocked = true;
    12440  }
    12441 
    12442  pCtx->Begin(overlappingMoveSupported);
    12443 
    12444  // Defragment.
    12445 
    12446  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
    12447  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
    12448  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
    12449  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
    12450  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
    12451 
    12452  // Accumulate statistics.
    12453  if(pStats != VMA_NULL)
    12454  {
    12455  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
    12456  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
    12457  pStats->bytesMoved += bytesMoved;
    12458  pStats->allocationsMoved += allocationsMoved;
    12459  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    12460  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    12461  if(defragmentOnGpu)
    12462  {
    12463  maxGpuBytesToMove -= bytesMoved;
    12464  maxGpuAllocationsToMove -= allocationsMoved;
    12465  }
    12466  else
    12467  {
    12468  maxCpuBytesToMove -= bytesMoved;
    12469  maxCpuAllocationsToMove -= allocationsMoved;
    12470  }
    12471  }
    12472 
    12473  if(pCtx->res >= VK_SUCCESS)
    12474  {
    12475  if(defragmentOnGpu)
    12476  {
    12477  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
    12478  }
    12479  else
    12480  {
    12481  ApplyDefragmentationMovesCpu(pCtx, moves);
    12482  }
    12483  }
    12484  }
    12485 }
    12486 
    12487 void VmaBlockVector::DefragmentationEnd(
    12488  class VmaBlockVectorDefragmentationContext* pCtx,
    12489  VmaDefragmentationStats* pStats)
    12490 {
    12491  // Destroy buffers.
    12492  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
    12493  {
    12494  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
    12495  if(blockCtx.hBuffer)
    12496  {
    12497  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
    12498  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
    12499  }
    12500  }
    12501 
    12502  if(pCtx->res >= VK_SUCCESS)
    12503  {
    12504  FreeEmptyBlocks(pStats);
    12505  }
    12506 
    12507  if(pCtx->mutexLocked)
    12508  {
    12509  VMA_ASSERT(m_hAllocator->m_UseMutex);
    12510  m_Mutex.UnlockWrite();
    12511  }
    12512 }
    12513 
    12514 size_t VmaBlockVector::CalcAllocationCount() const
    12515 {
    12516  size_t result = 0;
    12517  for(size_t i = 0; i < m_Blocks.size(); ++i)
    12518  {
    12519  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
    12520  }
    12521  return result;
    12522 }
    12523 
    12524 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
    12525 {
    12526  if(m_BufferImageGranularity == 1)
    12527  {
    12528  return false;
    12529  }
    12530  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
    12531  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
    12532  {
    12533  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
    12534  VMA_ASSERT(m_Algorithm == 0);
    12535  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
    12536  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
    12537  {
    12538  return true;
    12539  }
    12540  }
    12541  return false;
    12542 }
    12543 
    12544 void VmaBlockVector::MakePoolAllocationsLost(
    12545  uint32_t currentFrameIndex,
    12546  size_t* pLostAllocationCount)
    12547 {
    12548  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
    12549  size_t lostAllocationCount = 0;
    12550  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    12551  {
    12552  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    12553  VMA_ASSERT(pBlock);
    12554  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    12555  }
    12556  if(pLostAllocationCount != VMA_NULL)
    12557  {
    12558  *pLostAllocationCount = lostAllocationCount;
    12559  }
    12560 }
    12561 
    12562 VkResult VmaBlockVector::CheckCorruption()
    12563 {
    12564  if(!IsCorruptionDetectionEnabled())
    12565  {
    12566  return VK_ERROR_FEATURE_NOT_PRESENT;
    12567  }
    12568 
    12569  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    12570  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    12571  {
    12572  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    12573  VMA_ASSERT(pBlock);
    12574  VkResult res = pBlock->CheckCorruption(m_hAllocator);
    12575  if(res != VK_SUCCESS)
    12576  {
    12577  return res;
    12578  }
    12579  }
    12580  return VK_SUCCESS;
    12581 }
    12582 
    12583 void VmaBlockVector::AddStats(VmaStats* pStats)
    12584 {
    12585  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    12586  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    12587 
    12588  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    12589 
    12590  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    12591  {
    12592  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    12593  VMA_ASSERT(pBlock);
    12594  VMA_HEAVY_ASSERT(pBlock->Validate());
    12595  VmaStatInfo allocationStatInfo;
    12596  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
    12597  VmaAddStatInfo(pStats->total, allocationStatInfo);
    12598  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    12599  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    12600  }
    12601 }
    12602 
    12604 // VmaDefragmentationAlgorithm_Generic members definition
    12605 
    12606 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
    12607  VmaAllocator hAllocator,
    12608  VmaBlockVector* pBlockVector,
    12609  uint32_t currentFrameIndex,
    12610  bool overlappingMoveSupported) :
    12611  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
    12612  m_AllAllocations(false),
    12613  m_AllocationCount(0),
    12614  m_BytesMoved(0),
    12615  m_AllocationsMoved(0),
    12616  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    12617 {
    12618  // Create block info for each block.
    12619  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    12620  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    12621  {
    12622  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    12623  pBlockInfo->m_OriginalBlockIndex = blockIndex;
    12624  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    12625  m_Blocks.push_back(pBlockInfo);
    12626  }
    12627 
    12628  // Sort them by m_pBlock pointer value.
    12629  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    12630 }
    12631 
    12632 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
    12633 {
    12634  for(size_t i = m_Blocks.size(); i--; )
    12635  {
    12636  vma_delete(m_hAllocator, m_Blocks[i]);
    12637  }
    12638 }
    12639 
    12640 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    12641 {
    12642  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    12643  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    12644  {
    12645  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
    12646  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    12647  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    12648  {
    12649  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
    12650  (*it)->m_Allocations.push_back(allocInfo);
    12651  }
    12652  else
    12653  {
    12654  VMA_ASSERT(0);
    12655  }
    12656 
    12657  ++m_AllocationCount;
    12658  }
    12659 }
    12660 
    12661 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
    12662  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12663  VkDeviceSize maxBytesToMove,
    12664  uint32_t maxAllocationsToMove)
    12665 {
    12666  if(m_Blocks.empty())
    12667  {
    12668  return VK_SUCCESS;
    12669  }
    12670 
    12671  // This is a choice based on research.
    12672  // Option 1:
    12673  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
    12674  // Option 2:
    12675  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
    12676  // Option 3:
    12677  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
    12678 
    12679  size_t srcBlockMinIndex = 0;
    12680  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
    12681  /*
    12682  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
    12683  {
    12684  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
    12685  if(blocksWithNonMovableCount > 0)
    12686  {
    12687  srcBlockMinIndex = blocksWithNonMovableCount - 1;
    12688  }
    12689  }
    12690  */
    12691 
    12692  size_t srcBlockIndex = m_Blocks.size() - 1;
    12693  size_t srcAllocIndex = SIZE_MAX;
    12694  for(;;)
    12695  {
    12696  // 1. Find next allocation to move.
    12697  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    12698  // 1.2. Then start from last to first m_Allocations.
    12699  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    12700  {
    12701  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    12702  {
    12703  // Finished: no more allocations to process.
    12704  if(srcBlockIndex == srcBlockMinIndex)
    12705  {
    12706  return VK_SUCCESS;
    12707  }
    12708  else
    12709  {
    12710  --srcBlockIndex;
    12711  srcAllocIndex = SIZE_MAX;
    12712  }
    12713  }
    12714  else
    12715  {
    12716  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    12717  }
    12718  }
    12719 
    12720  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    12721  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    12722 
    12723  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    12724  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    12725  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    12726  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    12727 
    12728  // 2. Try to find new place for this allocation in preceding or current block.
    12729  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    12730  {
    12731  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    12732  VmaAllocationRequest dstAllocRequest;
    12733  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
    12734  m_CurrentFrameIndex,
    12735  m_pBlockVector->GetFrameInUseCount(),
    12736  m_pBlockVector->GetBufferImageGranularity(),
    12737  size,
    12738  alignment,
    12739  false, // upperAddress
    12740  suballocType,
    12741  false, // canMakeOtherLost
    12742  strategy,
    12743  &dstAllocRequest) &&
    12744  MoveMakesSense(
    12745  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    12746  {
    12747  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    12748 
    12749  // Reached limit on number of allocations or bytes to move.
    12750  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    12751  (m_BytesMoved + size > maxBytesToMove))
    12752  {
    12753  return VK_SUCCESS;
    12754  }
    12755 
    12756  VmaDefragmentationMove move;
    12757  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
    12758  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
    12759  move.srcOffset = srcOffset;
    12760  move.dstOffset = dstAllocRequest.offset;
    12761  move.size = size;
    12762  moves.push_back(move);
    12763 
    12764  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
    12765  dstAllocRequest,
    12766  suballocType,
    12767  size,
    12768  allocInfo.m_hAllocation);
    12769  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
    12770 
    12771  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    12772 
    12773  if(allocInfo.m_pChanged != VMA_NULL)
    12774  {
    12775  *allocInfo.m_pChanged = VK_TRUE;
    12776  }
    12777 
    12778  ++m_AllocationsMoved;
    12779  m_BytesMoved += size;
    12780 
    12781  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    12782 
    12783  break;
    12784  }
    12785  }
    12786 
    12787  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    12788 
    12789  if(srcAllocIndex > 0)
    12790  {
    12791  --srcAllocIndex;
    12792  }
    12793  else
    12794  {
    12795  if(srcBlockIndex > 0)
    12796  {
    12797  --srcBlockIndex;
    12798  srcAllocIndex = SIZE_MAX;
    12799  }
    12800  else
    12801  {
    12802  return VK_SUCCESS;
    12803  }
    12804  }
    12805  }
    12806 }
    12807 
    12808 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
    12809 {
    12810  size_t result = 0;
    12811  for(size_t i = 0; i < m_Blocks.size(); ++i)
    12812  {
    12813  if(m_Blocks[i]->m_HasNonMovableAllocations)
    12814  {
    12815  ++result;
    12816  }
    12817  }
    12818  return result;
    12819 }
    12820 
    12821 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
    12822  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12823  VkDeviceSize maxBytesToMove,
    12824  uint32_t maxAllocationsToMove)
    12825 {
    12826  if(!m_AllAllocations && m_AllocationCount == 0)
    12827  {
    12828  return VK_SUCCESS;
    12829  }
    12830 
    12831  const size_t blockCount = m_Blocks.size();
    12832  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    12833  {
    12834  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    12835 
    12836  if(m_AllAllocations)
    12837  {
    12838  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
    12839  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
    12840  it != pMetadata->m_Suballocations.end();
    12841  ++it)
    12842  {
    12843  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    12844  {
    12845  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
    12846  pBlockInfo->m_Allocations.push_back(allocInfo);
    12847  }
    12848  }
    12849  }
    12850 
    12851  pBlockInfo->CalcHasNonMovableAllocations();
    12852 
    12853  // This is a choice based on research.
    12854  // Option 1:
    12855  pBlockInfo->SortAllocationsByOffsetDescending();
    12856  // Option 2:
    12857  //pBlockInfo->SortAllocationsBySizeDescending();
    12858  }
    12859 
    12860  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    12861  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    12862 
    12863  // This is a choice based on research.
    12864  const uint32_t roundCount = 2;
    12865 
    12866  // Execute defragmentation rounds (the main part).
    12867  VkResult result = VK_SUCCESS;
    12868  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
    12869  {
    12870  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
    12871  }
    12872 
    12873  return result;
    12874 }
    12875 
    12876 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
    12877  size_t dstBlockIndex, VkDeviceSize dstOffset,
    12878  size_t srcBlockIndex, VkDeviceSize srcOffset)
    12879 {
    12880  if(dstBlockIndex < srcBlockIndex)
    12881  {
    12882  return true;
    12883  }
    12884  if(dstBlockIndex > srcBlockIndex)
    12885  {
    12886  return false;
    12887  }
    12888  if(dstOffset < srcOffset)
    12889  {
    12890  return true;
    12891  }
    12892  return false;
    12893 }
    12894 
    12896 // VmaDefragmentationAlgorithm_Fast
    12897 
    12898 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
    12899  VmaAllocator hAllocator,
    12900  VmaBlockVector* pBlockVector,
    12901  uint32_t currentFrameIndex,
    12902  bool overlappingMoveSupported) :
    12903  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
    12904  m_OverlappingMoveSupported(overlappingMoveSupported),
    12905  m_AllocationCount(0),
    12906  m_AllAllocations(false),
    12907  m_BytesMoved(0),
    12908  m_AllocationsMoved(0),
    12909  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
    12910 {
    12911  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
    12912 
    12913 }
    12914 
    12915 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
    12916 {
    12917 }
    12918 
    12919 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
    12920  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12921  VkDeviceSize maxBytesToMove,
    12922  uint32_t maxAllocationsToMove)
    12923 {
    12924  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
    12925 
    12926  const size_t blockCount = m_pBlockVector->GetBlockCount();
    12927  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
    12928  {
    12929  return VK_SUCCESS;
    12930  }
    12931 
    12932  PreprocessMetadata();
    12933 
    12934  // Sort blocks in order from most destination.
    12935 
    12936  m_BlockInfos.resize(blockCount);
    12937  for(size_t i = 0; i < blockCount; ++i)
    12938  {
    12939  m_BlockInfos[i].origBlockIndex = i;
    12940  }
    12941 
    12942  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
    12943  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
    12944  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
    12945  });
    12946 
    12947  // THE MAIN ALGORITHM
    12948 
    12949  FreeSpaceDatabase freeSpaceDb;
    12950 
    12951  size_t dstBlockInfoIndex = 0;
    12952  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
    12953  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
    12954  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
    12955  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
    12956  VkDeviceSize dstOffset = 0;
    12957 
    12958  bool end = false;
    12959  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
    12960  {
    12961  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
    12962  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
    12963  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
    12964  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
    12965  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
    12966  {
    12967  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
    12968  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
    12969  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
    12970  if(m_AllocationsMoved == maxAllocationsToMove ||
    12971  m_BytesMoved + srcAllocSize > maxBytesToMove)
    12972  {
    12973  end = true;
    12974  break;
    12975  }
    12976  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
    12977 
    12978  // Try to place it in one of free spaces from the database.
    12979  size_t freeSpaceInfoIndex;
    12980  VkDeviceSize dstAllocOffset;
    12981  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
    12982  freeSpaceInfoIndex, dstAllocOffset))
    12983  {
    12984  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
    12985  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
    12986  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
    12987 
    12988  // Same block
    12989  if(freeSpaceInfoIndex == srcBlockInfoIndex)
    12990  {
    12991  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
    12992 
    12993  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
    12994 
    12995  VmaSuballocation suballoc = *srcSuballocIt;
    12996  suballoc.offset = dstAllocOffset;
    12997  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
    12998  m_BytesMoved += srcAllocSize;
    12999  ++m_AllocationsMoved;
    13000 
    13001  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
    13002  ++nextSuballocIt;
    13003  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
    13004  srcSuballocIt = nextSuballocIt;
    13005 
    13006  InsertSuballoc(pFreeSpaceMetadata, suballoc);
    13007 
    13008  VmaDefragmentationMove move = {
    13009  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
    13010  srcAllocOffset, dstAllocOffset,
    13011  srcAllocSize };
    13012  moves.push_back(move);
    13013  }
    13014  // Different block
    13015  else
    13016  {
    13017  // MOVE OPTION 2: Move the allocation to a different block.
    13018 
    13019  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
    13020 
    13021  VmaSuballocation suballoc = *srcSuballocIt;
    13022  suballoc.offset = dstAllocOffset;
    13023  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
    13024  m_BytesMoved += srcAllocSize;
    13025  ++m_AllocationsMoved;
    13026 
    13027  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
    13028  ++nextSuballocIt;
    13029  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
    13030  srcSuballocIt = nextSuballocIt;
    13031 
    13032  InsertSuballoc(pFreeSpaceMetadata, suballoc);
    13033 
    13034  VmaDefragmentationMove move = {
    13035  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
    13036  srcAllocOffset, dstAllocOffset,
    13037  srcAllocSize };
    13038  moves.push_back(move);
    13039  }
    13040  }
    13041  else
    13042  {
    13043  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
    13044 
    13045  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
    13046  while(dstBlockInfoIndex < srcBlockInfoIndex &&
    13047  dstAllocOffset + srcAllocSize > dstBlockSize)
    13048  {
    13049  // But before that, register remaining free space at the end of dst block.
    13050  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
    13051 
    13052  ++dstBlockInfoIndex;
    13053  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
    13054  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
    13055  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
    13056  dstBlockSize = pDstMetadata->GetSize();
    13057  dstOffset = 0;
    13058  dstAllocOffset = 0;
    13059  }
    13060 
    13061  // Same block
    13062  if(dstBlockInfoIndex == srcBlockInfoIndex)
    13063  {
    13064  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
    13065 
    13066  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
    13067 
    13068  bool skipOver = overlap;
    13069  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
    13070  {
    13071  // If destination and source place overlap, skip if it would move it
    13072  // by only < 1/64 of its size.
    13073  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
    13074  }
    13075 
    13076  if(skipOver)
    13077  {
    13078  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
    13079 
    13080  dstOffset = srcAllocOffset + srcAllocSize;
    13081  ++srcSuballocIt;
    13082  }
    13083  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
    13084  else
    13085  {
    13086  srcSuballocIt->offset = dstAllocOffset;
    13087  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
    13088  dstOffset = dstAllocOffset + srcAllocSize;
    13089  m_BytesMoved += srcAllocSize;
    13090  ++m_AllocationsMoved;
    13091  ++srcSuballocIt;
    13092  VmaDefragmentationMove move = {
    13093  srcOrigBlockIndex, dstOrigBlockIndex,
    13094  srcAllocOffset, dstAllocOffset,
    13095  srcAllocSize };
    13096  moves.push_back(move);
    13097  }
    13098  }
    13099  // Different block
    13100  else
    13101  {
    13102  // MOVE OPTION 2: Move the allocation to a different block.
    13103 
    13104  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
    13105  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
    13106 
    13107  VmaSuballocation suballoc = *srcSuballocIt;
    13108  suballoc.offset = dstAllocOffset;
    13109  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
    13110  dstOffset = dstAllocOffset + srcAllocSize;
    13111  m_BytesMoved += srcAllocSize;
    13112  ++m_AllocationsMoved;
    13113 
    13114  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
    13115  ++nextSuballocIt;
    13116  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
    13117  srcSuballocIt = nextSuballocIt;
    13118 
    13119  pDstMetadata->m_Suballocations.push_back(suballoc);
    13120 
    13121  VmaDefragmentationMove move = {
    13122  srcOrigBlockIndex, dstOrigBlockIndex,
    13123  srcAllocOffset, dstAllocOffset,
    13124  srcAllocSize };
    13125  moves.push_back(move);
    13126  }
    13127  }
    13128  }
    13129  }
    13130 
    13131  m_BlockInfos.clear();
    13132 
    13133  PostprocessMetadata();
    13134 
    13135  return VK_SUCCESS;
    13136 }
    13137 
    13138 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
    13139 {
    13140  const size_t blockCount = m_pBlockVector->GetBlockCount();
    13141  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    13142  {
    13143  VmaBlockMetadata_Generic* const pMetadata =
    13144  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
    13145  pMetadata->m_FreeCount = 0;
    13146  pMetadata->m_SumFreeSize = pMetadata->GetSize();
    13147  pMetadata->m_FreeSuballocationsBySize.clear();
    13148  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
    13149  it != pMetadata->m_Suballocations.end(); )
    13150  {
    13151  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
    13152  {
    13153  VmaSuballocationList::iterator nextIt = it;
    13154  ++nextIt;
    13155  pMetadata->m_Suballocations.erase(it);
    13156  it = nextIt;
    13157  }
    13158  else
    13159  {
    13160  ++it;
    13161  }
    13162  }
    13163  }
    13164 }
    13165 
    13166 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
    13167 {
    13168  const size_t blockCount = m_pBlockVector->GetBlockCount();
    13169  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    13170  {
    13171  VmaBlockMetadata_Generic* const pMetadata =
    13172  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
    13173  const VkDeviceSize blockSize = pMetadata->GetSize();
    13174 
    13175  // No allocations in this block - entire area is free.
    13176  if(pMetadata->m_Suballocations.empty())
    13177  {
    13178  pMetadata->m_FreeCount = 1;
    13179  //pMetadata->m_SumFreeSize is already set to blockSize.
    13180  VmaSuballocation suballoc = {
    13181  0, // offset
    13182  blockSize, // size
    13183  VMA_NULL, // hAllocation
    13184  VMA_SUBALLOCATION_TYPE_FREE };
    13185  pMetadata->m_Suballocations.push_back(suballoc);
    13186  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
    13187  }
    13188  // There are some allocations in this block.
    13189  else
    13190  {
    13191  VkDeviceSize offset = 0;
    13192  VmaSuballocationList::iterator it;
    13193  for(it = pMetadata->m_Suballocations.begin();
    13194  it != pMetadata->m_Suballocations.end();
    13195  ++it)
    13196  {
    13197  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
    13198  VMA_ASSERT(it->offset >= offset);
    13199 
    13200  // Need to insert preceding free space.
    13201  if(it->offset > offset)
    13202  {
    13203  ++pMetadata->m_FreeCount;
    13204  const VkDeviceSize freeSize = it->offset - offset;
    13205  VmaSuballocation suballoc = {
    13206  offset, // offset
    13207  freeSize, // size
    13208  VMA_NULL, // hAllocation
    13209  VMA_SUBALLOCATION_TYPE_FREE };
    13210  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
    13211  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    13212  {
    13213  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
    13214  }
    13215  }
    13216 
    13217  pMetadata->m_SumFreeSize -= it->size;
    13218  offset = it->offset + it->size;
    13219  }
    13220 
    13221  // Need to insert trailing free space.
    13222  if(offset < blockSize)
    13223  {
    13224  ++pMetadata->m_FreeCount;
    13225  const VkDeviceSize freeSize = blockSize - offset;
    13226  VmaSuballocation suballoc = {
    13227  offset, // offset
    13228  freeSize, // size
    13229  VMA_NULL, // hAllocation
    13230  VMA_SUBALLOCATION_TYPE_FREE };
    13231  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
    13232  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
    13233  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    13234  {
    13235  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
    13236  }
    13237  }
    13238 
    13239  VMA_SORT(
    13240  pMetadata->m_FreeSuballocationsBySize.begin(),
    13241  pMetadata->m_FreeSuballocationsBySize.end(),
    13242  VmaSuballocationItemSizeLess());
    13243  }
    13244 
    13245  VMA_HEAVY_ASSERT(pMetadata->Validate());
    13246  }
    13247 }
    13248 
    13249 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
    13250 {
    13251  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
    13252  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
    13253  while(it != pMetadata->m_Suballocations.end())
    13254  {
    13255  if(it->offset < suballoc.offset)
    13256  {
    13257  ++it;
    13258  }
    13259  }
    13260  pMetadata->m_Suballocations.insert(it, suballoc);
    13261 }
    13262 
    13264 // VmaBlockVectorDefragmentationContext
    13265 
    13266 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
    13267  VmaAllocator hAllocator,
    13268  VmaPool hCustomPool,
    13269  VmaBlockVector* pBlockVector,
    13270  uint32_t currFrameIndex,
    13271  uint32_t algorithmFlags) :
    13272  res(VK_SUCCESS),
    13273  mutexLocked(false),
    13274  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
    13275  m_hAllocator(hAllocator),
    13276  m_hCustomPool(hCustomPool),
    13277  m_pBlockVector(pBlockVector),
    13278  m_CurrFrameIndex(currFrameIndex),
    13279  m_AlgorithmFlags(algorithmFlags),
    13280  m_pAlgorithm(VMA_NULL),
    13281  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
    13282  m_AllAllocations(false)
    13283 {
    13284 }
    13285 
    13286 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
    13287 {
    13288  vma_delete(m_hAllocator, m_pAlgorithm);
    13289 }
    13290 
    13291 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    13292 {
    13293  AllocInfo info = { hAlloc, pChanged };
    13294  m_Allocations.push_back(info);
    13295 }
    13296 
    13297 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
    13298 {
    13299  const bool allAllocations = m_AllAllocations ||
    13300  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
    13301 
    13302  /********************************
    13303  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
    13304  ********************************/
    13305 
    13306  /*
    13307  Fast algorithm is supported only when certain criteria are met:
    13308  - VMA_DEBUG_MARGIN is 0.
    13309  - All allocations in this block vector are moveable.
    13310  - There is no possibility of image/buffer granularity conflict.
    13311  */
    13312  if(VMA_DEBUG_MARGIN == 0 &&
    13313  allAllocations &&
    13314  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
    13315  {
    13316  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
    13317  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
    13318  }
    13319  else
    13320  {
    13321  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
    13322  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
    13323  }
    13324 
    13325  if(allAllocations)
    13326  {
    13327  m_pAlgorithm->AddAll();
    13328  }
    13329  else
    13330  {
    13331  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
    13332  {
    13333  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
    13334  }
    13335  }
    13336 }
    13337 
    13339 // VmaDefragmentationContext
    13340 
    13341 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
    13342  VmaAllocator hAllocator,
    13343  uint32_t currFrameIndex,
    13344  uint32_t flags,
    13345  VmaDefragmentationStats* pStats) :
    13346  m_hAllocator(hAllocator),
    13347  m_CurrFrameIndex(currFrameIndex),
    13348  m_Flags(flags),
    13349  m_pStats(pStats),
    13350  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
    13351 {
    13352  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
    13353 }
    13354 
    13355 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
    13356 {
    13357  for(size_t i = m_CustomPoolContexts.size(); i--; )
    13358  {
    13359  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
    13360  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
    13361  vma_delete(m_hAllocator, pBlockVectorCtx);
    13362  }
    13363  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
    13364  {
    13365  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
    13366  if(pBlockVectorCtx)
    13367  {
    13368  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
    13369  vma_delete(m_hAllocator, pBlockVectorCtx);
    13370  }
    13371  }
    13372 }
    13373 
    13374 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
    13375 {
    13376  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    13377  {
    13378  VmaPool pool = pPools[poolIndex];
    13379  VMA_ASSERT(pool);
    13380  // Pools with algorithm other than default are not defragmented.
    13381  if(pool->m_BlockVector.GetAlgorithm() == 0)
    13382  {
    13383  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
    13384 
    13385  for(size_t i = m_CustomPoolContexts.size(); i--; )
    13386  {
    13387  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
    13388  {
    13389  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
    13390  break;
    13391  }
    13392  }
    13393 
    13394  if(!pBlockVectorDefragCtx)
    13395  {
    13396  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
    13397  m_hAllocator,
    13398  pool,
    13399  &pool->m_BlockVector,
    13400  m_CurrFrameIndex,
    13401  m_Flags);
    13402  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
    13403  }
    13404 
    13405  pBlockVectorDefragCtx->AddAll();
    13406  }
    13407  }
    13408 }
    13409 
    13410 void VmaDefragmentationContext_T::AddAllocations(
    13411  uint32_t allocationCount,
    13412  VmaAllocation* pAllocations,
    13413  VkBool32* pAllocationsChanged)
    13414 {
    13415  // Dispatch pAllocations among defragmentators. Create them when necessary.
    13416  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    13417  {
    13418  const VmaAllocation hAlloc = pAllocations[allocIndex];
    13419  VMA_ASSERT(hAlloc);
    13420  // DedicatedAlloc cannot be defragmented.
    13421  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    13422  // Lost allocation cannot be defragmented.
    13423  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    13424  {
    13425  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
    13426 
    13427  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
    13428  // This allocation belongs to custom pool.
    13429  if(hAllocPool != VK_NULL_HANDLE)
    13430  {
    13431  // Pools with algorithm other than default are not defragmented.
    13432  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
    13433  {
    13434  for(size_t i = m_CustomPoolContexts.size(); i--; )
    13435  {
    13436  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
    13437  {
    13438  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
    13439  break;
    13440  }
    13441  }
    13442  if(!pBlockVectorDefragCtx)
    13443  {
    13444  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
    13445  m_hAllocator,
    13446  hAllocPool,
    13447  &hAllocPool->m_BlockVector,
    13448  m_CurrFrameIndex,
    13449  m_Flags);
    13450  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
    13451  }
    13452  }
    13453  }
    13454  // This allocation belongs to default pool.
    13455  else
    13456  {
    13457  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    13458  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
    13459  if(!pBlockVectorDefragCtx)
    13460  {
    13461  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
    13462  m_hAllocator,
    13463  VMA_NULL, // hCustomPool
    13464  m_hAllocator->m_pBlockVectors[memTypeIndex],
    13465  m_CurrFrameIndex,
    13466  m_Flags);
    13467  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
    13468  }
    13469  }
    13470 
    13471  if(pBlockVectorDefragCtx)
    13472  {
    13473  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    13474  &pAllocationsChanged[allocIndex] : VMA_NULL;
    13475  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
    13476  }
    13477  }
    13478  }
    13479 }
    13480 
    13481 VkResult VmaDefragmentationContext_T::Defragment(
    13482  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
    13483  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
    13484  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
    13485 {
    13486  if(pStats)
    13487  {
    13488  memset(pStats, 0, sizeof(VmaDefragmentationStats));
    13489  }
    13490 
    13491  if(commandBuffer == VK_NULL_HANDLE)
    13492  {
    13493  maxGpuBytesToMove = 0;
    13494  maxGpuAllocationsToMove = 0;
    13495  }
    13496 
    13497  VkResult res = VK_SUCCESS;
    13498 
    13499  // Process default pools.
    13500  for(uint32_t memTypeIndex = 0;
    13501  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
    13502  ++memTypeIndex)
    13503  {
    13504  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
    13505  if(pBlockVectorCtx)
    13506  {
    13507  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
    13508  pBlockVectorCtx->GetBlockVector()->Defragment(
    13509  pBlockVectorCtx,
    13510  pStats,
    13511  maxCpuBytesToMove, maxCpuAllocationsToMove,
    13512  maxGpuBytesToMove, maxGpuAllocationsToMove,
    13513  commandBuffer);
    13514  if(pBlockVectorCtx->res != VK_SUCCESS)
    13515  {
    13516  res = pBlockVectorCtx->res;
    13517  }
    13518  }
    13519  }
    13520 
    13521  // Process custom pools.
    13522  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
    13523  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
    13524  ++customCtxIndex)
    13525  {
    13526  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
    13527  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
    13528  pBlockVectorCtx->GetBlockVector()->Defragment(
    13529  pBlockVectorCtx,
    13530  pStats,
    13531  maxCpuBytesToMove, maxCpuAllocationsToMove,
    13532  maxGpuBytesToMove, maxGpuAllocationsToMove,
    13533  commandBuffer);
    13534  if(pBlockVectorCtx->res != VK_SUCCESS)
    13535  {
    13536  res = pBlockVectorCtx->res;
    13537  }
    13538  }
    13539 
    13540  return res;
    13541 }
    13542 
    13544 // VmaRecorder
    13545 
    13546 #if VMA_RECORDING_ENABLED
    13547 
    13548 VmaRecorder::VmaRecorder() :
    13549  m_UseMutex(true),
    13550  m_Flags(0),
    13551  m_File(VMA_NULL),
    13552  m_Freq(INT64_MAX),
    13553  m_StartCounter(INT64_MAX)
    13554 {
    13555 }
    13556 
    13557 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
    13558 {
    13559  m_UseMutex = useMutex;
    13560  m_Flags = settings.flags;
    13561 
    13562  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
    13563  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
    13564 
    13565  // Open file for writing.
    13566  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
    13567  if(err != 0)
    13568  {
    13569  return VK_ERROR_INITIALIZATION_FAILED;
    13570  }
    13571 
    13572  // Write header.
    13573  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
    13574  fprintf(m_File, "%s\n", "1,5");
    13575 
    13576  return VK_SUCCESS;
    13577 }
    13578 
    13579 VmaRecorder::~VmaRecorder()
    13580 {
    13581  if(m_File != VMA_NULL)
    13582  {
    13583  fclose(m_File);
    13584  }
    13585 }
    13586 
    13587 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
    13588 {
    13589  CallParams callParams;
    13590  GetBasicParams(callParams);
    13591 
    13592  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13593  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
    13594  Flush();
    13595 }
    13596 
    13597 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
    13598 {
    13599  CallParams callParams;
    13600  GetBasicParams(callParams);
    13601 
    13602  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13603  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
    13604  Flush();
    13605 }
    13606 
    13607 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
    13608 {
    13609  CallParams callParams;
    13610  GetBasicParams(callParams);
    13611 
    13612  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13613  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
    13614  createInfo.memoryTypeIndex,
    13615  createInfo.flags,
    13616  createInfo.blockSize,
    13617  (uint64_t)createInfo.minBlockCount,
    13618  (uint64_t)createInfo.maxBlockCount,
    13619  createInfo.frameInUseCount,
    13620  pool);
    13621  Flush();
    13622 }
    13623 
    13624 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
    13625 {
    13626  CallParams callParams;
    13627  GetBasicParams(callParams);
    13628 
    13629  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13630  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
    13631  pool);
    13632  Flush();
    13633 }
    13634 
    13635 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
    13636  const VkMemoryRequirements& vkMemReq,
    13637  const VmaAllocationCreateInfo& createInfo,
    13638  VmaAllocation allocation)
    13639 {
    13640  CallParams callParams;
    13641  GetBasicParams(callParams);
    13642 
    13643  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13644  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13645  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13646  vkMemReq.size,
    13647  vkMemReq.alignment,
    13648  vkMemReq.memoryTypeBits,
    13649  createInfo.flags,
    13650  createInfo.usage,
    13651  createInfo.requiredFlags,
    13652  createInfo.preferredFlags,
    13653  createInfo.memoryTypeBits,
    13654  createInfo.pool,
    13655  allocation,
    13656  userDataStr.GetString());
    13657  Flush();
    13658 }
    13659 
    13660 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
    13661  const VkMemoryRequirements& vkMemReq,
    13662  const VmaAllocationCreateInfo& createInfo,
    13663  uint64_t allocationCount,
    13664  const VmaAllocation* pAllocations)
    13665 {
    13666  CallParams callParams;
    13667  GetBasicParams(callParams);
    13668 
    13669  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13670  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13671  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
    13672  vkMemReq.size,
    13673  vkMemReq.alignment,
    13674  vkMemReq.memoryTypeBits,
    13675  createInfo.flags,
    13676  createInfo.usage,
    13677  createInfo.requiredFlags,
    13678  createInfo.preferredFlags,
    13679  createInfo.memoryTypeBits,
    13680  createInfo.pool);
    13681  PrintPointerList(allocationCount, pAllocations);
    13682  fprintf(m_File, ",%s\n", userDataStr.GetString());
    13683  Flush();
    13684 }
    13685 
    13686 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    13687  const VkMemoryRequirements& vkMemReq,
    13688  bool requiresDedicatedAllocation,
    13689  bool prefersDedicatedAllocation,
    13690  const VmaAllocationCreateInfo& createInfo,
    13691  VmaAllocation allocation)
    13692 {
    13693  CallParams callParams;
    13694  GetBasicParams(callParams);
    13695 
    13696  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13697  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13698  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13699  vkMemReq.size,
    13700  vkMemReq.alignment,
    13701  vkMemReq.memoryTypeBits,
    13702  requiresDedicatedAllocation ? 1 : 0,
    13703  prefersDedicatedAllocation ? 1 : 0,
    13704  createInfo.flags,
    13705  createInfo.usage,
    13706  createInfo.requiredFlags,
    13707  createInfo.preferredFlags,
    13708  createInfo.memoryTypeBits,
    13709  createInfo.pool,
    13710  allocation,
    13711  userDataStr.GetString());
    13712  Flush();
    13713 }
    13714 
    13715 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
    13716  const VkMemoryRequirements& vkMemReq,
    13717  bool requiresDedicatedAllocation,
    13718  bool prefersDedicatedAllocation,
    13719  const VmaAllocationCreateInfo& createInfo,
    13720  VmaAllocation allocation)
    13721 {
    13722  CallParams callParams;
    13723  GetBasicParams(callParams);
    13724 
    13725  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13726  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13727  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13728  vkMemReq.size,
    13729  vkMemReq.alignment,
    13730  vkMemReq.memoryTypeBits,
    13731  requiresDedicatedAllocation ? 1 : 0,
    13732  prefersDedicatedAllocation ? 1 : 0,
    13733  createInfo.flags,
    13734  createInfo.usage,
    13735  createInfo.requiredFlags,
    13736  createInfo.preferredFlags,
    13737  createInfo.memoryTypeBits,
    13738  createInfo.pool,
    13739  allocation,
    13740  userDataStr.GetString());
    13741  Flush();
    13742 }
    13743 
    13744 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
    13745  VmaAllocation allocation)
    13746 {
    13747  CallParams callParams;
    13748  GetBasicParams(callParams);
    13749 
    13750  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13751  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    13752  allocation);
    13753  Flush();
    13754 }
    13755 
    13756 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
    13757  uint64_t allocationCount,
    13758  const VmaAllocation* pAllocations)
    13759 {
    13760  CallParams callParams;
    13761  GetBasicParams(callParams);
    13762 
    13763  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13764  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
    13765  PrintPointerList(allocationCount, pAllocations);
    13766  fprintf(m_File, "\n");
    13767  Flush();
    13768 }
    13769 
    13770 void VmaRecorder::RecordResizeAllocation(
    13771  uint32_t frameIndex,
    13772  VmaAllocation allocation,
    13773  VkDeviceSize newSize)
    13774 {
    13775  CallParams callParams;
    13776  GetBasicParams(callParams);
    13777 
    13778  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13779  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
    13780  allocation, newSize);
    13781  Flush();
    13782 }
    13783 
    13784 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
    13785  VmaAllocation allocation,
    13786  const void* pUserData)
    13787 {
    13788  CallParams callParams;
    13789  GetBasicParams(callParams);
    13790 
    13791  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13792  UserDataString userDataStr(
    13793  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
    13794  pUserData);
    13795  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13796  allocation,
    13797  userDataStr.GetString());
    13798  Flush();
    13799 }
    13800 
    13801 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
    13802  VmaAllocation allocation)
    13803 {
    13804  CallParams callParams;
    13805  GetBasicParams(callParams);
    13806 
    13807  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13808  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    13809  allocation);
    13810  Flush();
    13811 }
    13812 
    13813 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
    13814  VmaAllocation allocation)
    13815 {
    13816  CallParams callParams;
    13817  GetBasicParams(callParams);
    13818 
    13819  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13820  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    13821  allocation);
    13822  Flush();
    13823 }
    13824 
    13825 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
    13826  VmaAllocation allocation)
    13827 {
    13828  CallParams callParams;
    13829  GetBasicParams(callParams);
    13830 
    13831  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13832  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    13833  allocation);
    13834  Flush();
    13835 }
    13836 
    13837 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
    13838  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    13839 {
    13840  CallParams callParams;
    13841  GetBasicParams(callParams);
    13842 
    13843  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13844  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    13845  allocation,
    13846  offset,
    13847  size);
    13848  Flush();
    13849 }
    13850 
    13851 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
    13852  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    13853 {
    13854  CallParams callParams;
    13855  GetBasicParams(callParams);
    13856 
    13857  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13858  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    13859  allocation,
    13860  offset,
    13861  size);
    13862  Flush();
    13863 }
    13864 
    13865 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
    13866  const VkBufferCreateInfo& bufCreateInfo,
    13867  const VmaAllocationCreateInfo& allocCreateInfo,
    13868  VmaAllocation allocation)
    13869 {
    13870  CallParams callParams;
    13871  GetBasicParams(callParams);
    13872 
    13873  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13874  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    13875  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13876  bufCreateInfo.flags,
    13877  bufCreateInfo.size,
    13878  bufCreateInfo.usage,
    13879  bufCreateInfo.sharingMode,
    13880  allocCreateInfo.flags,
    13881  allocCreateInfo.usage,
    13882  allocCreateInfo.requiredFlags,
    13883  allocCreateInfo.preferredFlags,
    13884  allocCreateInfo.memoryTypeBits,
    13885  allocCreateInfo.pool,
    13886  allocation,
    13887  userDataStr.GetString());
    13888  Flush();
    13889 }
    13890 
    13891 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
    13892  const VkImageCreateInfo& imageCreateInfo,
    13893  const VmaAllocationCreateInfo& allocCreateInfo,
    13894  VmaAllocation allocation)
    13895 {
    13896  CallParams callParams;
    13897  GetBasicParams(callParams);
    13898 
    13899  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13900  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    13901  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13902  imageCreateInfo.flags,
    13903  imageCreateInfo.imageType,
    13904  imageCreateInfo.format,
    13905  imageCreateInfo.extent.width,
    13906  imageCreateInfo.extent.height,
    13907  imageCreateInfo.extent.depth,
    13908  imageCreateInfo.mipLevels,
    13909  imageCreateInfo.arrayLayers,
    13910  imageCreateInfo.samples,
    13911  imageCreateInfo.tiling,
    13912  imageCreateInfo.usage,
    13913  imageCreateInfo.sharingMode,
    13914  imageCreateInfo.initialLayout,
    13915  allocCreateInfo.flags,
    13916  allocCreateInfo.usage,
    13917  allocCreateInfo.requiredFlags,
    13918  allocCreateInfo.preferredFlags,
    13919  allocCreateInfo.memoryTypeBits,
    13920  allocCreateInfo.pool,
    13921  allocation,
    13922  userDataStr.GetString());
    13923  Flush();
    13924 }
    13925 
    13926 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
    13927  VmaAllocation allocation)
    13928 {
    13929  CallParams callParams;
    13930  GetBasicParams(callParams);
    13931 
    13932  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13933  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
    13934  allocation);
    13935  Flush();
    13936 }
    13937 
    13938 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
    13939  VmaAllocation allocation)
    13940 {
    13941  CallParams callParams;
    13942  GetBasicParams(callParams);
    13943 
    13944  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13945  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
    13946  allocation);
    13947  Flush();
    13948 }
    13949 
    13950 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
    13951  VmaAllocation allocation)
    13952 {
    13953  CallParams callParams;
    13954  GetBasicParams(callParams);
    13955 
    13956  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13957  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    13958  allocation);
    13959  Flush();
    13960 }
    13961 
    13962 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
    13963  VmaAllocation allocation)
    13964 {
    13965  CallParams callParams;
    13966  GetBasicParams(callParams);
    13967 
    13968  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13969  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
    13970  allocation);
    13971  Flush();
    13972 }
    13973 
    13974 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
    13975  VmaPool pool)
    13976 {
    13977  CallParams callParams;
    13978  GetBasicParams(callParams);
    13979 
    13980  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13981  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
    13982  pool);
    13983  Flush();
    13984 }
    13985 
    13986 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
    13987  const VmaDefragmentationInfo2& info,
    13989 {
    13990  CallParams callParams;
    13991  GetBasicParams(callParams);
    13992 
    13993  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13994  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
    13995  info.flags);
    13996  PrintPointerList(info.allocationCount, info.pAllocations);
    13997  fprintf(m_File, ",");
    13998  PrintPointerList(info.poolCount, info.pPools);
    13999  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
    14000  info.maxCpuBytesToMove,
    14002  info.maxGpuBytesToMove,
    14004  info.commandBuffer,
    14005  ctx);
    14006  Flush();
    14007 }
    14008 
    14009 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
    14011 {
    14012  CallParams callParams;
    14013  GetBasicParams(callParams);
    14014 
    14015  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    14016  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
    14017  ctx);
    14018  Flush();
    14019 }
    14020 
    14021 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
    14022 {
    14023  if(pUserData != VMA_NULL)
    14024  {
    14025  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
    14026  {
    14027  m_Str = (const char*)pUserData;
    14028  }
    14029  else
    14030  {
    14031  sprintf_s(m_PtrStr, "%p", pUserData);
    14032  m_Str = m_PtrStr;
    14033  }
    14034  }
    14035  else
    14036  {
    14037  m_Str = "";
    14038  }
    14039 }
    14040 
    14041 void VmaRecorder::WriteConfiguration(
    14042  const VkPhysicalDeviceProperties& devProps,
    14043  const VkPhysicalDeviceMemoryProperties& memProps,
    14044  bool dedicatedAllocationExtensionEnabled)
    14045 {
    14046  fprintf(m_File, "Config,Begin\n");
    14047 
    14048  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
    14049  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
    14050  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
    14051  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
    14052  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
    14053  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
    14054 
    14055  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
    14056  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
    14057  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
    14058 
    14059  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
    14060  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
    14061  {
    14062  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
    14063  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
    14064  }
    14065  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
    14066  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
    14067  {
    14068  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
    14069  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
    14070  }
    14071 
    14072  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
    14073 
    14074  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
    14075  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
    14076  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
    14077  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
    14078  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
    14079  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
    14080  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
    14081  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
    14082  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    14083 
    14084  fprintf(m_File, "Config,End\n");
    14085 }
    14086 
    14087 void VmaRecorder::GetBasicParams(CallParams& outParams)
    14088 {
    14089  outParams.threadId = GetCurrentThreadId();
    14090 
    14091  LARGE_INTEGER counter;
    14092  QueryPerformanceCounter(&counter);
    14093  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
    14094 }
    14095 
    14096 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
    14097 {
    14098  if(count)
    14099  {
    14100  fprintf(m_File, "%p", pItems[0]);
    14101  for(uint64_t i = 1; i < count; ++i)
    14102  {
    14103  fprintf(m_File, " %p", pItems[i]);
    14104  }
    14105  }
    14106 }
    14107 
    14108 void VmaRecorder::Flush()
    14109 {
    14110  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
    14111  {
    14112  fflush(m_File);
    14113  }
    14114 }
    14115 
    14116 #endif // #if VMA_RECORDING_ENABLED
    14117 
    14119 // VmaAllocationObjectAllocator
    14120 
    14121 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
    14122  m_Allocator(pAllocationCallbacks, 1024)
    14123 {
    14124 }
    14125 
    14126 VmaAllocation VmaAllocationObjectAllocator::Allocate()
    14127 {
    14128  VmaMutexLock mutexLock(m_Mutex);
    14129  return m_Allocator.Alloc();
    14130 }
    14131 
    14132 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
    14133 {
    14134  VmaMutexLock mutexLock(m_Mutex);
    14135  m_Allocator.Free(hAlloc);
    14136 }
    14137 
    14139 // VmaAllocator_T
    14140 
    14141 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    14142  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    14143  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    14144  m_hDevice(pCreateInfo->device),
    14145  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    14146  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    14147  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    14148  m_AllocationObjectAllocator(&m_AllocationCallbacks),
    14149  m_PreferredLargeHeapBlockSize(0),
    14150  m_PhysicalDevice(pCreateInfo->physicalDevice),
    14151  m_CurrentFrameIndex(0),
    14152  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
    14153  m_NextPoolId(0)
    14155  ,m_pRecorder(VMA_NULL)
    14156 #endif
    14157 {
    14158  if(VMA_DEBUG_DETECT_CORRUPTION)
    14159  {
    14160  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
    14161  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
    14162  }
    14163 
    14164  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    14165 
    14166 #if !(VMA_DEDICATED_ALLOCATION)
    14168  {
    14169  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
    14170  }
    14171 #endif
    14172 
    14173  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    14174  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    14175  memset(&m_MemProps, 0, sizeof(m_MemProps));
    14176 
    14177  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    14178  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    14179 
    14180  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    14181  {
    14182  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    14183  }
    14184 
    14185  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    14186  {
    14187  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    14188  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    14189  }
    14190 
    14191  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    14192 
    14193  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    14194  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    14195 
    14196  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
    14197  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
    14198  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
    14199  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
    14200 
    14201  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    14202  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    14203 
    14204  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    14205  {
    14206  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    14207  {
    14208  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    14209  if(limit != VK_WHOLE_SIZE)
    14210  {
    14211  m_HeapSizeLimit[heapIndex] = limit;
    14212  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    14213  {
    14214  m_MemProps.memoryHeaps[heapIndex].size = limit;
    14215  }
    14216  }
    14217  }
    14218  }
    14219 
    14220  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    14221  {
    14222  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    14223 
    14224  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    14225  this,
    14226  VK_NULL_HANDLE, // hParentPool
    14227  memTypeIndex,
    14228  preferredBlockSize,
    14229  0,
    14230  SIZE_MAX,
    14231  GetBufferImageGranularity(),
    14232  pCreateInfo->frameInUseCount,
    14233  false, // isCustomPool
    14234  false, // explicitBlockSize
    14235  false); // linearAlgorithm
    14236  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    14237  // becase minBlockCount is 0.
    14238  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    14239 
    14240  }
    14241 }
    14242 
    14243 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
    14244 {
    14245  VkResult res = VK_SUCCESS;
    14246 
    14247  if(pCreateInfo->pRecordSettings != VMA_NULL &&
    14248  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
    14249  {
    14250 #if VMA_RECORDING_ENABLED
    14251  m_pRecorder = vma_new(this, VmaRecorder)();
    14252  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
    14253  if(res != VK_SUCCESS)
    14254  {
    14255  return res;
    14256  }
    14257  m_pRecorder->WriteConfiguration(
    14258  m_PhysicalDeviceProperties,
    14259  m_MemProps,
    14260  m_UseKhrDedicatedAllocation);
    14261  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
    14262 #else
    14263  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
    14264  return VK_ERROR_FEATURE_NOT_PRESENT;
    14265 #endif
    14266  }
    14267 
    14268  return res;
    14269 }
    14270 
    14271 VmaAllocator_T::~VmaAllocator_T()
    14272 {
    14273 #if VMA_RECORDING_ENABLED
    14274  if(m_pRecorder != VMA_NULL)
    14275  {
    14276  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
    14277  vma_delete(this, m_pRecorder);
    14278  }
    14279 #endif
    14280 
    14281  VMA_ASSERT(m_Pools.empty());
    14282 
    14283  for(size_t i = GetMemoryTypeCount(); i--; )
    14284  {
    14285  if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
    14286  {
    14287  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
    14288  }
    14289 
    14290  vma_delete(this, m_pDedicatedAllocations[i]);
    14291  vma_delete(this, m_pBlockVectors[i]);
    14292  }
    14293 }
    14294 
    14295 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    14296 {
    14297 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    14298  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
    14299  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
    14300  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
    14301  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
    14302  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
    14303  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
    14304  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
    14305  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
    14306  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
    14307  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
    14308  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
    14309  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
    14310  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
    14311  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
    14312  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
    14313  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
    14314  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
    14315 #if VMA_DEDICATED_ALLOCATION
    14316  if(m_UseKhrDedicatedAllocation)
    14317  {
    14318  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
    14319  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
    14320  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
    14321  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
    14322  }
    14323 #endif // #if VMA_DEDICATED_ALLOCATION
    14324 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    14325 
    14326 #define VMA_COPY_IF_NOT_NULL(funcName) \
    14327  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    14328 
    14329  if(pVulkanFunctions != VMA_NULL)
    14330  {
    14331  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    14332  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    14333  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    14334  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    14335  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    14336  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    14337  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
    14338  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
    14339  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    14340  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    14341  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    14342  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    14343  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    14344  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    14345  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    14346  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    14347  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
    14348 #if VMA_DEDICATED_ALLOCATION
    14349  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    14350  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    14351 #endif
    14352  }
    14353 
    14354 #undef VMA_COPY_IF_NOT_NULL
    14355 
    14356  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    14357  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    14358  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    14359  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    14360  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    14361  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    14362  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    14363  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    14364  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
    14365  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
    14366  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    14367  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    14368  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    14369  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    14370  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    14371  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    14372  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    14373  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    14374  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
    14375 #if VMA_DEDICATED_ALLOCATION
    14376  if(m_UseKhrDedicatedAllocation)
    14377  {
    14378  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    14379  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    14380  }
    14381 #endif
    14382 }
    14383 
    14384 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    14385 {
    14386  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    14387  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    14388  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
    14389  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
    14390 }
    14391 
    14392 VkResult VmaAllocator_T::AllocateMemoryOfType(
    14393  VkDeviceSize size,
    14394  VkDeviceSize alignment,
    14395  bool dedicatedAllocation,
    14396  VkBuffer dedicatedBuffer,
    14397  VkImage dedicatedImage,
    14398  const VmaAllocationCreateInfo& createInfo,
    14399  uint32_t memTypeIndex,
    14400  VmaSuballocationType suballocType,
    14401  size_t allocationCount,
    14402  VmaAllocation* pAllocations)
    14403 {
    14404  VMA_ASSERT(pAllocations != VMA_NULL);
    14405  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
    14406 
    14407  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    14408 
    14409  // If memory type is not HOST_VISIBLE, disable MAPPED.
    14410  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    14411  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    14412  {
    14413  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    14414  }
    14415 
    14416  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    14417  VMA_ASSERT(blockVector);
    14418 
    14419  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    14420  bool preferDedicatedMemory =
    14421  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    14422  dedicatedAllocation ||
    14423  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    14424  size > preferredBlockSize / 2;
    14425 
    14426  if(preferDedicatedMemory &&
    14427  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    14428  finalCreateInfo.pool == VK_NULL_HANDLE)
    14429  {
    14431  }
    14432 
    14433  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    14434  {
    14435  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14436  {
    14437  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14438  }
    14439  else
    14440  {
    14441  return AllocateDedicatedMemory(
    14442  size,
    14443  suballocType,
    14444  memTypeIndex,
    14445  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    14446  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    14447  finalCreateInfo.pUserData,
    14448  dedicatedBuffer,
    14449  dedicatedImage,
    14450  allocationCount,
    14451  pAllocations);
    14452  }
    14453  }
    14454  else
    14455  {
    14456  VkResult res = blockVector->Allocate(
    14457  m_CurrentFrameIndex.load(),
    14458  size,
    14459  alignment,
    14460  finalCreateInfo,
    14461  suballocType,
    14462  allocationCount,
    14463  pAllocations);
    14464  if(res == VK_SUCCESS)
    14465  {
    14466  return res;
    14467  }
    14468 
    14469  // 5. Try dedicated memory.
    14470  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14471  {
    14472  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14473  }
    14474  else
    14475  {
    14476  res = AllocateDedicatedMemory(
    14477  size,
    14478  suballocType,
    14479  memTypeIndex,
    14480  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    14481  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    14482  finalCreateInfo.pUserData,
    14483  dedicatedBuffer,
    14484  dedicatedImage,
    14485  allocationCount,
    14486  pAllocations);
    14487  if(res == VK_SUCCESS)
    14488  {
    14489  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    14490  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    14491  return VK_SUCCESS;
    14492  }
    14493  else
    14494  {
    14495  // Everything failed: Return error code.
    14496  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    14497  return res;
    14498  }
    14499  }
    14500  }
    14501 }
    14502 
    14503 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    14504  VkDeviceSize size,
    14505  VmaSuballocationType suballocType,
    14506  uint32_t memTypeIndex,
    14507  bool map,
    14508  bool isUserDataString,
    14509  void* pUserData,
    14510  VkBuffer dedicatedBuffer,
    14511  VkImage dedicatedImage,
    14512  size_t allocationCount,
    14513  VmaAllocation* pAllocations)
    14514 {
    14515  VMA_ASSERT(allocationCount > 0 && pAllocations);
    14516 
    14517  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    14518  allocInfo.memoryTypeIndex = memTypeIndex;
    14519  allocInfo.allocationSize = size;
    14520 
    14521 #if VMA_DEDICATED_ALLOCATION
    14522  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    14523  if(m_UseKhrDedicatedAllocation)
    14524  {
    14525  if(dedicatedBuffer != VK_NULL_HANDLE)
    14526  {
    14527  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    14528  dedicatedAllocInfo.buffer = dedicatedBuffer;
    14529  allocInfo.pNext = &dedicatedAllocInfo;
    14530  }
    14531  else if(dedicatedImage != VK_NULL_HANDLE)
    14532  {
    14533  dedicatedAllocInfo.image = dedicatedImage;
    14534  allocInfo.pNext = &dedicatedAllocInfo;
    14535  }
    14536  }
    14537 #endif // #if VMA_DEDICATED_ALLOCATION
    14538 
    14539  size_t allocIndex;
    14540  VkResult res = VK_SUCCESS;
    14541  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    14542  {
    14543  res = AllocateDedicatedMemoryPage(
    14544  size,
    14545  suballocType,
    14546  memTypeIndex,
    14547  allocInfo,
    14548  map,
    14549  isUserDataString,
    14550  pUserData,
    14551  pAllocations + allocIndex);
    14552  if(res != VK_SUCCESS)
    14553  {
    14554  break;
    14555  }
    14556  }
    14557 
    14558  if(res == VK_SUCCESS)
    14559  {
    14560  // Register them in m_pDedicatedAllocations.
    14561  {
    14562  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    14563  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    14564  VMA_ASSERT(pDedicatedAllocations);
    14565  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    14566  {
    14567  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
    14568  }
    14569  }
    14570 
    14571  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
    14572  }
    14573  else
    14574  {
    14575  // Free all already created allocations.
    14576  while(allocIndex--)
    14577  {
    14578  VmaAllocation currAlloc = pAllocations[allocIndex];
    14579  VkDeviceMemory hMemory = currAlloc->GetMemory();
    14580 
    14581  /*
    14582  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    14583  before vkFreeMemory.
    14584 
    14585  if(currAlloc->GetMappedData() != VMA_NULL)
    14586  {
    14587  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    14588  }
    14589  */
    14590 
    14591  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
    14592 
    14593  currAlloc->SetUserData(this, VMA_NULL);
    14594  currAlloc->Dtor();
    14595  m_AllocationObjectAllocator.Free(currAlloc);
    14596  }
    14597 
    14598  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
    14599  }
    14600 
    14601  return res;
    14602 }
    14603 
    14604 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
    14605  VkDeviceSize size,
    14606  VmaSuballocationType suballocType,
    14607  uint32_t memTypeIndex,
    14608  const VkMemoryAllocateInfo& allocInfo,
    14609  bool map,
    14610  bool isUserDataString,
    14611  void* pUserData,
    14612  VmaAllocation* pAllocation)
    14613 {
    14614  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    14615  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    14616  if(res < 0)
    14617  {
    14618  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    14619  return res;
    14620  }
    14621 
    14622  void* pMappedData = VMA_NULL;
    14623  if(map)
    14624  {
    14625  res = (*m_VulkanFunctions.vkMapMemory)(
    14626  m_hDevice,
    14627  hMemory,
    14628  0,
    14629  VK_WHOLE_SIZE,
    14630  0,
    14631  &pMappedData);
    14632  if(res < 0)
    14633  {
    14634  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    14635  FreeVulkanMemory(memTypeIndex, size, hMemory);
    14636  return res;
    14637  }
    14638  }
    14639 
    14640  *pAllocation = m_AllocationObjectAllocator.Allocate();
    14641  (*pAllocation)->Ctor(m_CurrentFrameIndex.load(), isUserDataString);
    14642  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
    14643  (*pAllocation)->SetUserData(this, pUserData);
    14644  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    14645  {
    14646  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    14647  }
    14648 
    14649  return VK_SUCCESS;
    14650 }
    14651 
    14652 void VmaAllocator_T::GetBufferMemoryRequirements(
    14653  VkBuffer hBuffer,
    14654  VkMemoryRequirements& memReq,
    14655  bool& requiresDedicatedAllocation,
    14656  bool& prefersDedicatedAllocation) const
    14657 {
    14658 #if VMA_DEDICATED_ALLOCATION
    14659  if(m_UseKhrDedicatedAllocation)
    14660  {
    14661  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    14662  memReqInfo.buffer = hBuffer;
    14663 
    14664  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    14665 
    14666  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    14667  memReq2.pNext = &memDedicatedReq;
    14668 
    14669  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    14670 
    14671  memReq = memReq2.memoryRequirements;
    14672  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    14673  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    14674  }
    14675  else
    14676 #endif // #if VMA_DEDICATED_ALLOCATION
    14677  {
    14678  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    14679  requiresDedicatedAllocation = false;
    14680  prefersDedicatedAllocation = false;
    14681  }
    14682 }
    14683 
    14684 void VmaAllocator_T::GetImageMemoryRequirements(
    14685  VkImage hImage,
    14686  VkMemoryRequirements& memReq,
    14687  bool& requiresDedicatedAllocation,
    14688  bool& prefersDedicatedAllocation) const
    14689 {
    14690 #if VMA_DEDICATED_ALLOCATION
    14691  if(m_UseKhrDedicatedAllocation)
    14692  {
    14693  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    14694  memReqInfo.image = hImage;
    14695 
    14696  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    14697 
    14698  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    14699  memReq2.pNext = &memDedicatedReq;
    14700 
    14701  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    14702 
    14703  memReq = memReq2.memoryRequirements;
    14704  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    14705  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    14706  }
    14707  else
    14708 #endif // #if VMA_DEDICATED_ALLOCATION
    14709  {
    14710  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    14711  requiresDedicatedAllocation = false;
    14712  prefersDedicatedAllocation = false;
    14713  }
    14714 }
    14715 
    14716 VkResult VmaAllocator_T::AllocateMemory(
    14717  const VkMemoryRequirements& vkMemReq,
    14718  bool requiresDedicatedAllocation,
    14719  bool prefersDedicatedAllocation,
    14720  VkBuffer dedicatedBuffer,
    14721  VkImage dedicatedImage,
    14722  const VmaAllocationCreateInfo& createInfo,
    14723  VmaSuballocationType suballocType,
    14724  size_t allocationCount,
    14725  VmaAllocation* pAllocations)
    14726 {
    14727  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
    14728 
    14729  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
    14730 
    14731  if(vkMemReq.size == 0)
    14732  {
    14733  return VK_ERROR_VALIDATION_FAILED_EXT;
    14734  }
    14735  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    14736  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14737  {
    14738  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    14739  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14740  }
    14741  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    14743  {
    14744  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    14745  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14746  }
    14747  if(requiresDedicatedAllocation)
    14748  {
    14749  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14750  {
    14751  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    14752  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14753  }
    14754  if(createInfo.pool != VK_NULL_HANDLE)
    14755  {
    14756  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    14757  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14758  }
    14759  }
    14760  if((createInfo.pool != VK_NULL_HANDLE) &&
    14761  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    14762  {
    14763  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    14764  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14765  }
    14766 
    14767  if(createInfo.pool != VK_NULL_HANDLE)
    14768  {
    14769  const VkDeviceSize alignmentForPool = VMA_MAX(
    14770  vkMemReq.alignment,
    14771  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
    14772  return createInfo.pool->m_BlockVector.Allocate(
    14773  m_CurrentFrameIndex.load(),
    14774  vkMemReq.size,
    14775  alignmentForPool,
    14776  createInfo,
    14777  suballocType,
    14778  allocationCount,
    14779  pAllocations);
    14780  }
    14781  else
    14782  {
    14783  // Bit mask of memory Vulkan types acceptable for this allocation.
    14784  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    14785  uint32_t memTypeIndex = UINT32_MAX;
    14786  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    14787  if(res == VK_SUCCESS)
    14788  {
    14789  VkDeviceSize alignmentForMemType = VMA_MAX(
    14790  vkMemReq.alignment,
    14791  GetMemoryTypeMinAlignment(memTypeIndex));
    14792 
    14793  res = AllocateMemoryOfType(
    14794  vkMemReq.size,
    14795  alignmentForMemType,
    14796  requiresDedicatedAllocation || prefersDedicatedAllocation,
    14797  dedicatedBuffer,
    14798  dedicatedImage,
    14799  createInfo,
    14800  memTypeIndex,
    14801  suballocType,
    14802  allocationCount,
    14803  pAllocations);
    14804  // Succeeded on first try.
    14805  if(res == VK_SUCCESS)
    14806  {
    14807  return res;
    14808  }
    14809  // Allocation from this memory type failed. Try other compatible memory types.
    14810  else
    14811  {
    14812  for(;;)
    14813  {
    14814  // Remove old memTypeIndex from list of possibilities.
    14815  memoryTypeBits &= ~(1u << memTypeIndex);
    14816  // Find alternative memTypeIndex.
    14817  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    14818  if(res == VK_SUCCESS)
    14819  {
    14820  alignmentForMemType = VMA_MAX(
    14821  vkMemReq.alignment,
    14822  GetMemoryTypeMinAlignment(memTypeIndex));
    14823 
    14824  res = AllocateMemoryOfType(
    14825  vkMemReq.size,
    14826  alignmentForMemType,
    14827  requiresDedicatedAllocation || prefersDedicatedAllocation,
    14828  dedicatedBuffer,
    14829  dedicatedImage,
    14830  createInfo,
    14831  memTypeIndex,
    14832  suballocType,
    14833  allocationCount,
    14834  pAllocations);
    14835  // Allocation from this alternative memory type succeeded.
    14836  if(res == VK_SUCCESS)
    14837  {
    14838  return res;
    14839  }
    14840  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    14841  }
    14842  // No other matching memory type index could be found.
    14843  else
    14844  {
    14845  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    14846  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14847  }
    14848  }
    14849  }
    14850  }
    14851  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    14852  else
    14853  return res;
    14854  }
    14855 }
    14856 
    14857 void VmaAllocator_T::FreeMemory(
    14858  size_t allocationCount,
    14859  const VmaAllocation* pAllocations)
    14860 {
    14861  VMA_ASSERT(pAllocations);
    14862 
    14863  for(size_t allocIndex = allocationCount; allocIndex--; )
    14864  {
    14865  VmaAllocation allocation = pAllocations[allocIndex];
    14866 
    14867  if(allocation != VK_NULL_HANDLE)
    14868  {
    14869  if(TouchAllocation(allocation))
    14870  {
    14871  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    14872  {
    14873  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
    14874  }
    14875 
    14876  switch(allocation->GetType())
    14877  {
    14878  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    14879  {
    14880  VmaBlockVector* pBlockVector = VMA_NULL;
    14881  VmaPool hPool = allocation->GetBlock()->GetParentPool();
    14882  if(hPool != VK_NULL_HANDLE)
    14883  {
    14884  pBlockVector = &hPool->m_BlockVector;
    14885  }
    14886  else
    14887  {
    14888  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    14889  pBlockVector = m_pBlockVectors[memTypeIndex];
    14890  }
    14891  pBlockVector->Free(allocation);
    14892  }
    14893  break;
    14894  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    14895  FreeDedicatedMemory(allocation);
    14896  break;
    14897  default:
    14898  VMA_ASSERT(0);
    14899  }
    14900  }
    14901 
    14902  allocation->SetUserData(this, VMA_NULL);
    14903  allocation->Dtor();
    14904  m_AllocationObjectAllocator.Free(allocation);
    14905  }
    14906  }
    14907 }
    14908 
    14909 VkResult VmaAllocator_T::ResizeAllocation(
    14910  const VmaAllocation alloc,
    14911  VkDeviceSize newSize)
    14912 {
    14913  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
    14914  {
    14915  return VK_ERROR_VALIDATION_FAILED_EXT;
    14916  }
    14917  if(newSize == alloc->GetSize())
    14918  {
    14919  return VK_SUCCESS;
    14920  }
    14921 
    14922  switch(alloc->GetType())
    14923  {
    14924  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    14925  return VK_ERROR_FEATURE_NOT_PRESENT;
    14926  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    14927  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
    14928  {
    14929  alloc->ChangeSize(newSize);
    14930  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
    14931  return VK_SUCCESS;
    14932  }
    14933  else
    14934  {
    14935  return VK_ERROR_OUT_OF_POOL_MEMORY;
    14936  }
    14937  default:
    14938  VMA_ASSERT(0);
    14939  return VK_ERROR_VALIDATION_FAILED_EXT;
    14940  }
    14941 }
    14942 
    14943 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    14944 {
    14945  // Initialize.
    14946  InitStatInfo(pStats->total);
    14947  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    14948  InitStatInfo(pStats->memoryType[i]);
    14949  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    14950  InitStatInfo(pStats->memoryHeap[i]);
    14951 
    14952  // Process default pools.
    14953  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    14954  {
    14955  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    14956  VMA_ASSERT(pBlockVector);
    14957  pBlockVector->AddStats(pStats);
    14958  }
    14959 
    14960  // Process custom pools.
    14961  {
    14962  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
    14963  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    14964  {
    14965  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
    14966  }
    14967  }
    14968 
    14969  // Process dedicated allocations.
    14970  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    14971  {
    14972  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    14973  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    14974  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    14975  VMA_ASSERT(pDedicatedAllocVector);
    14976  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    14977  {
    14978  VmaStatInfo allocationStatInfo;
    14979  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    14980  VmaAddStatInfo(pStats->total, allocationStatInfo);
    14981  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    14982  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    14983  }
    14984  }
    14985 
    14986  // Postprocess.
    14987  VmaPostprocessCalcStatInfo(pStats->total);
    14988  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    14989  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    14990  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    14991  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    14992 }
    14993 
    14994 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    14995 
    14996 VkResult VmaAllocator_T::DefragmentationBegin(
    14997  const VmaDefragmentationInfo2& info,
    14998  VmaDefragmentationStats* pStats,
    14999  VmaDefragmentationContext* pContext)
    15000 {
    15001  if(info.pAllocationsChanged != VMA_NULL)
    15002  {
    15003  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
    15004  }
    15005 
    15006  *pContext = vma_new(this, VmaDefragmentationContext_T)(
    15007  this, m_CurrentFrameIndex.load(), info.flags, pStats);
    15008 
    15009  (*pContext)->AddPools(info.poolCount, info.pPools);
    15010  (*pContext)->AddAllocations(
    15012 
    15013  VkResult res = (*pContext)->Defragment(
    15016  info.commandBuffer, pStats);
    15017 
    15018  if(res != VK_NOT_READY)
    15019  {
    15020  vma_delete(this, *pContext);
    15021  *pContext = VMA_NULL;
    15022  }
    15023 
    15024  return res;
    15025 }
    15026 
    15027 VkResult VmaAllocator_T::DefragmentationEnd(
    15028  VmaDefragmentationContext context)
    15029 {
    15030  vma_delete(this, context);
    15031  return VK_SUCCESS;
    15032 }
    15033 
    15034 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    15035 {
    15036  if(hAllocation->CanBecomeLost())
    15037  {
    15038  /*
    15039  Warning: This is a carefully designed algorithm.
    15040  Do not modify unless you really know what you're doing :)
    15041  */
    15042  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    15043  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    15044  for(;;)
    15045  {
    15046  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    15047  {
    15048  pAllocationInfo->memoryType = UINT32_MAX;
    15049  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    15050  pAllocationInfo->offset = 0;
    15051  pAllocationInfo->size = hAllocation->GetSize();
    15052  pAllocationInfo->pMappedData = VMA_NULL;
    15053  pAllocationInfo->pUserData = hAllocation->GetUserData();
    15054  return;
    15055  }
    15056  else if(localLastUseFrameIndex == localCurrFrameIndex)
    15057  {
    15058  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    15059  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    15060  pAllocationInfo->offset = hAllocation->GetOffset();
    15061  pAllocationInfo->size = hAllocation->GetSize();
    15062  pAllocationInfo->pMappedData = VMA_NULL;
    15063  pAllocationInfo->pUserData = hAllocation->GetUserData();
    15064  return;
    15065  }
    15066  else // Last use time earlier than current time.
    15067  {
    15068  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    15069  {
    15070  localLastUseFrameIndex = localCurrFrameIndex;
    15071  }
    15072  }
    15073  }
    15074  }
    15075  else
    15076  {
    15077 #if VMA_STATS_STRING_ENABLED
    15078  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    15079  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    15080  for(;;)
    15081  {
    15082  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    15083  if(localLastUseFrameIndex == localCurrFrameIndex)
    15084  {
    15085  break;
    15086  }
    15087  else // Last use time earlier than current time.
    15088  {
    15089  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    15090  {
    15091  localLastUseFrameIndex = localCurrFrameIndex;
    15092  }
    15093  }
    15094  }
    15095 #endif
    15096 
    15097  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    15098  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    15099  pAllocationInfo->offset = hAllocation->GetOffset();
    15100  pAllocationInfo->size = hAllocation->GetSize();
    15101  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    15102  pAllocationInfo->pUserData = hAllocation->GetUserData();
    15103  }
    15104 }
    15105 
    15106 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
    15107 {
    15108  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
    15109  if(hAllocation->CanBecomeLost())
    15110  {
    15111  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    15112  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    15113  for(;;)
    15114  {
    15115  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    15116  {
    15117  return false;
    15118  }
    15119  else if(localLastUseFrameIndex == localCurrFrameIndex)
    15120  {
    15121  return true;
    15122  }
    15123  else // Last use time earlier than current time.
    15124  {
    15125  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    15126  {
    15127  localLastUseFrameIndex = localCurrFrameIndex;
    15128  }
    15129  }
    15130  }
    15131  }
    15132  else
    15133  {
    15134 #if VMA_STATS_STRING_ENABLED
    15135  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    15136  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    15137  for(;;)
    15138  {
    15139  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    15140  if(localLastUseFrameIndex == localCurrFrameIndex)
    15141  {
    15142  break;
    15143  }
    15144  else // Last use time earlier than current time.
    15145  {
    15146  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    15147  {
    15148  localLastUseFrameIndex = localCurrFrameIndex;
    15149  }
    15150  }
    15151  }
    15152 #endif
    15153 
    15154  return true;
    15155  }
    15156 }
    15157 
    15158 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    15159 {
    15160  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
    15161 
    15162  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    15163 
    15164  if(newCreateInfo.maxBlockCount == 0)
    15165  {
    15166  newCreateInfo.maxBlockCount = SIZE_MAX;
    15167  }
    15168  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
    15169  {
    15170  return VK_ERROR_INITIALIZATION_FAILED;
    15171  }
    15172 
    15173  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    15174 
    15175  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
    15176 
    15177  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    15178  if(res != VK_SUCCESS)
    15179  {
    15180  vma_delete(this, *pPool);
    15181  *pPool = VMA_NULL;
    15182  return res;
    15183  }
    15184 
    15185  // Add to m_Pools.
    15186  {
    15187  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
    15188  (*pPool)->SetId(m_NextPoolId++);
    15189  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    15190  }
    15191 
    15192  return VK_SUCCESS;
    15193 }
    15194 
    15195 void VmaAllocator_T::DestroyPool(VmaPool pool)
    15196 {
    15197  // Remove from m_Pools.
    15198  {
    15199  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
    15200  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    15201  VMA_ASSERT(success && "Pool not found in Allocator.");
    15202  }
    15203 
    15204  vma_delete(this, pool);
    15205 }
    15206 
    15207 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    15208 {
    15209  pool->m_BlockVector.GetPoolStats(pPoolStats);
    15210 }
    15211 
    15212 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    15213 {
    15214  m_CurrentFrameIndex.store(frameIndex);
    15215 }
    15216 
    15217 void VmaAllocator_T::MakePoolAllocationsLost(
    15218  VmaPool hPool,
    15219  size_t* pLostAllocationCount)
    15220 {
    15221  hPool->m_BlockVector.MakePoolAllocationsLost(
    15222  m_CurrentFrameIndex.load(),
    15223  pLostAllocationCount);
    15224 }
    15225 
    15226 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
    15227 {
    15228  return hPool->m_BlockVector.CheckCorruption();
    15229 }
    15230 
    15231 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
    15232 {
    15233  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
    15234 
    15235  // Process default pools.
    15236  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15237  {
    15238  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
    15239  {
    15240  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    15241  VMA_ASSERT(pBlockVector);
    15242  VkResult localRes = pBlockVector->CheckCorruption();
    15243  switch(localRes)
    15244  {
    15245  case VK_ERROR_FEATURE_NOT_PRESENT:
    15246  break;
    15247  case VK_SUCCESS:
    15248  finalRes = VK_SUCCESS;
    15249  break;
    15250  default:
    15251  return localRes;
    15252  }
    15253  }
    15254  }
    15255 
    15256  // Process custom pools.
    15257  {
    15258  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
    15259  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    15260  {
    15261  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
    15262  {
    15263  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
    15264  switch(localRes)
    15265  {
    15266  case VK_ERROR_FEATURE_NOT_PRESENT:
    15267  break;
    15268  case VK_SUCCESS:
    15269  finalRes = VK_SUCCESS;
    15270  break;
    15271  default:
    15272  return localRes;
    15273  }
    15274  }
    15275  }
    15276  }
    15277 
    15278  return finalRes;
    15279 }
    15280 
    15281 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    15282 {
    15283  *pAllocation = m_AllocationObjectAllocator.Allocate();
    15284  (*pAllocation)->Ctor(VMA_FRAME_INDEX_LOST, false);
    15285  (*pAllocation)->InitLost();
    15286 }
    15287 
    15288 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    15289 {
    15290  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    15291 
    15292  VkResult res;
    15293  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    15294  {
    15295  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    15296  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    15297  {
    15298  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    15299  if(res == VK_SUCCESS)
    15300  {
    15301  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    15302  }
    15303  }
    15304  else
    15305  {
    15306  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    15307  }
    15308  }
    15309  else
    15310  {
    15311  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    15312  }
    15313 
    15314  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    15315  {
    15316  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    15317  }
    15318 
    15319  return res;
    15320 }
    15321 
    15322 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    15323 {
    15324  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    15325  {
    15326  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    15327  }
    15328 
    15329  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    15330 
    15331  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    15332  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    15333  {
    15334  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    15335  m_HeapSizeLimit[heapIndex] += size;
    15336  }
    15337 }
    15338 
    15339 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    15340 {
    15341  if(hAllocation->CanBecomeLost())
    15342  {
    15343  return VK_ERROR_MEMORY_MAP_FAILED;
    15344  }
    15345 
    15346  switch(hAllocation->GetType())
    15347  {
    15348  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15349  {
    15350  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    15351  char *pBytes = VMA_NULL;
    15352  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
    15353  if(res == VK_SUCCESS)
    15354  {
    15355  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    15356  hAllocation->BlockAllocMap();
    15357  }
    15358  return res;
    15359  }
    15360  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15361  return hAllocation->DedicatedAllocMap(this, ppData);
    15362  default:
    15363  VMA_ASSERT(0);
    15364  return VK_ERROR_MEMORY_MAP_FAILED;
    15365  }
    15366 }
    15367 
    15368 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    15369 {
    15370  switch(hAllocation->GetType())
    15371  {
    15372  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15373  {
    15374  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    15375  hAllocation->BlockAllocUnmap();
    15376  pBlock->Unmap(this, 1);
    15377  }
    15378  break;
    15379  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15380  hAllocation->DedicatedAllocUnmap(this);
    15381  break;
    15382  default:
    15383  VMA_ASSERT(0);
    15384  }
    15385 }
    15386 
    15387 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
    15388 {
    15389  VkResult res = VK_SUCCESS;
    15390  switch(hAllocation->GetType())
    15391  {
    15392  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15393  res = GetVulkanFunctions().vkBindBufferMemory(
    15394  m_hDevice,
    15395  hBuffer,
    15396  hAllocation->GetMemory(),
    15397  0); //memoryOffset
    15398  break;
    15399  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15400  {
    15401  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    15402  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
    15403  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
    15404  break;
    15405  }
    15406  default:
    15407  VMA_ASSERT(0);
    15408  }
    15409  return res;
    15410 }
    15411 
    15412 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
    15413 {
    15414  VkResult res = VK_SUCCESS;
    15415  switch(hAllocation->GetType())
    15416  {
    15417  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15418  res = GetVulkanFunctions().vkBindImageMemory(
    15419  m_hDevice,
    15420  hImage,
    15421  hAllocation->GetMemory(),
    15422  0); //memoryOffset
    15423  break;
    15424  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15425  {
    15426  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    15427  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
    15428  res = pBlock->BindImageMemory(this, hAllocation, hImage);
    15429  break;
    15430  }
    15431  default:
    15432  VMA_ASSERT(0);
    15433  }
    15434  return res;
    15435 }
    15436 
    15437 void VmaAllocator_T::FlushOrInvalidateAllocation(
    15438  VmaAllocation hAllocation,
    15439  VkDeviceSize offset, VkDeviceSize size,
    15440  VMA_CACHE_OPERATION op)
    15441 {
    15442  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
    15443  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
    15444  {
    15445  const VkDeviceSize allocationSize = hAllocation->GetSize();
    15446  VMA_ASSERT(offset <= allocationSize);
    15447 
    15448  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    15449 
    15450  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    15451  memRange.memory = hAllocation->GetMemory();
    15452 
    15453  switch(hAllocation->GetType())
    15454  {
    15455  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15456  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    15457  if(size == VK_WHOLE_SIZE)
    15458  {
    15459  memRange.size = allocationSize - memRange.offset;
    15460  }
    15461  else
    15462  {
    15463  VMA_ASSERT(offset + size <= allocationSize);
    15464  memRange.size = VMA_MIN(
    15465  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
    15466  allocationSize - memRange.offset);
    15467  }
    15468  break;
    15469 
    15470  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15471  {
    15472  // 1. Still within this allocation.
    15473  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    15474  if(size == VK_WHOLE_SIZE)
    15475  {
    15476  size = allocationSize - offset;
    15477  }
    15478  else
    15479  {
    15480  VMA_ASSERT(offset + size <= allocationSize);
    15481  }
    15482  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
    15483 
    15484  // 2. Adjust to whole block.
    15485  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
    15486  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
    15487  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
    15488  memRange.offset += allocationOffset;
    15489  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
    15490 
    15491  break;
    15492  }
    15493 
    15494  default:
    15495  VMA_ASSERT(0);
    15496  }
    15497 
    15498  switch(op)
    15499  {
    15500  case VMA_CACHE_FLUSH:
    15501  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
    15502  break;
    15503  case VMA_CACHE_INVALIDATE:
    15504  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
    15505  break;
    15506  default:
    15507  VMA_ASSERT(0);
    15508  }
    15509  }
    15510  // else: Just ignore this call.
    15511 }
    15512 
    15513 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    15514 {
    15515  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    15516 
    15517  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    15518  {
    15519  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    15520  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    15521  VMA_ASSERT(pDedicatedAllocations);
    15522  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    15523  VMA_ASSERT(success);
    15524  }
    15525 
    15526  VkDeviceMemory hMemory = allocation->GetMemory();
    15527 
    15528  /*
    15529  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    15530  before vkFreeMemory.
    15531 
    15532  if(allocation->GetMappedData() != VMA_NULL)
    15533  {
    15534  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    15535  }
    15536  */
    15537 
    15538  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    15539 
    15540  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    15541 }
    15542 
    15543 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
    15544 {
    15545  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
    15546  !hAllocation->CanBecomeLost() &&
    15547  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    15548  {
    15549  void* pData = VMA_NULL;
    15550  VkResult res = Map(hAllocation, &pData);
    15551  if(res == VK_SUCCESS)
    15552  {
    15553  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
    15554  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
    15555  Unmap(hAllocation);
    15556  }
    15557  else
    15558  {
    15559  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
    15560  }
    15561  }
    15562 }
    15563 
    15564 #if VMA_STATS_STRING_ENABLED
    15565 
    15566 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    15567 {
    15568  bool dedicatedAllocationsStarted = false;
    15569  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15570  {
    15571  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    15572  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    15573  VMA_ASSERT(pDedicatedAllocVector);
    15574  if(pDedicatedAllocVector->empty() == false)
    15575  {
    15576  if(dedicatedAllocationsStarted == false)
    15577  {
    15578  dedicatedAllocationsStarted = true;
    15579  json.WriteString("DedicatedAllocations");
    15580  json.BeginObject();
    15581  }
    15582 
    15583  json.BeginString("Type ");
    15584  json.ContinueString(memTypeIndex);
    15585  json.EndString();
    15586 
    15587  json.BeginArray();
    15588 
    15589  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    15590  {
    15591  json.BeginObject(true);
    15592  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    15593  hAlloc->PrintParameters(json);
    15594  json.EndObject();
    15595  }
    15596 
    15597  json.EndArray();
    15598  }
    15599  }
    15600  if(dedicatedAllocationsStarted)
    15601  {
    15602  json.EndObject();
    15603  }
    15604 
    15605  {
    15606  bool allocationsStarted = false;
    15607  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15608  {
    15609  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    15610  {
    15611  if(allocationsStarted == false)
    15612  {
    15613  allocationsStarted = true;
    15614  json.WriteString("DefaultPools");
    15615  json.BeginObject();
    15616  }
    15617 
    15618  json.BeginString("Type ");
    15619  json.ContinueString(memTypeIndex);
    15620  json.EndString();
    15621 
    15622  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    15623  }
    15624  }
    15625  if(allocationsStarted)
    15626  {
    15627  json.EndObject();
    15628  }
    15629  }
    15630 
    15631  // Custom pools
    15632  {
    15633  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
    15634  const size_t poolCount = m_Pools.size();
    15635  if(poolCount > 0)
    15636  {
    15637  json.WriteString("Pools");
    15638  json.BeginObject();
    15639  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    15640  {
    15641  json.BeginString();
    15642  json.ContinueString(m_Pools[poolIndex]->GetId());
    15643  json.EndString();
    15644 
    15645  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    15646  }
    15647  json.EndObject();
    15648  }
    15649  }
    15650 }
    15651 
    15652 #endif // #if VMA_STATS_STRING_ENABLED
    15653 
    15655 // Public interface
    15656 
    15657 VkResult vmaCreateAllocator(
    15658  const VmaAllocatorCreateInfo* pCreateInfo,
    15659  VmaAllocator* pAllocator)
    15660 {
    15661  VMA_ASSERT(pCreateInfo && pAllocator);
    15662  VMA_DEBUG_LOG("vmaCreateAllocator");
    15663  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    15664  return (*pAllocator)->Init(pCreateInfo);
    15665 }
    15666 
    15667 void vmaDestroyAllocator(
    15668  VmaAllocator allocator)
    15669 {
    15670  if(allocator != VK_NULL_HANDLE)
    15671  {
    15672  VMA_DEBUG_LOG("vmaDestroyAllocator");
    15673  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    15674  vma_delete(&allocationCallbacks, allocator);
    15675  }
    15676 }
    15677 
    15679  VmaAllocator allocator,
    15680  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    15681 {
    15682  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    15683  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    15684 }
    15685 
    15687  VmaAllocator allocator,
    15688  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    15689 {
    15690  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    15691  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    15692 }
    15693 
    15695  VmaAllocator allocator,
    15696  uint32_t memoryTypeIndex,
    15697  VkMemoryPropertyFlags* pFlags)
    15698 {
    15699  VMA_ASSERT(allocator && pFlags);
    15700  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    15701  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    15702 }
    15703 
    15705  VmaAllocator allocator,
    15706  uint32_t frameIndex)
    15707 {
    15708  VMA_ASSERT(allocator);
    15709  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    15710 
    15711  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15712 
    15713  allocator->SetCurrentFrameIndex(frameIndex);
    15714 }
    15715 
    15716 void vmaCalculateStats(
    15717  VmaAllocator allocator,
    15718  VmaStats* pStats)
    15719 {
    15720  VMA_ASSERT(allocator && pStats);
    15721  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15722  allocator->CalculateStats(pStats);
    15723 }
    15724 
    15725 #if VMA_STATS_STRING_ENABLED
    15726 
    15727 void vmaBuildStatsString(
    15728  VmaAllocator allocator,
    15729  char** ppStatsString,
    15730  VkBool32 detailedMap)
    15731 {
    15732  VMA_ASSERT(allocator && ppStatsString);
    15733  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15734 
    15735  VmaStringBuilder sb(allocator);
    15736  {
    15737  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    15738  json.BeginObject();
    15739 
    15740  VmaStats stats;
    15741  allocator->CalculateStats(&stats);
    15742 
    15743  json.WriteString("Total");
    15744  VmaPrintStatInfo(json, stats.total);
    15745 
    15746  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    15747  {
    15748  json.BeginString("Heap ");
    15749  json.ContinueString(heapIndex);
    15750  json.EndString();
    15751  json.BeginObject();
    15752 
    15753  json.WriteString("Size");
    15754  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    15755 
    15756  json.WriteString("Flags");
    15757  json.BeginArray(true);
    15758  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    15759  {
    15760  json.WriteString("DEVICE_LOCAL");
    15761  }
    15762  json.EndArray();
    15763 
    15764  if(stats.memoryHeap[heapIndex].blockCount > 0)
    15765  {
    15766  json.WriteString("Stats");
    15767  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    15768  }
    15769 
    15770  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    15771  {
    15772  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    15773  {
    15774  json.BeginString("Type ");
    15775  json.ContinueString(typeIndex);
    15776  json.EndString();
    15777 
    15778  json.BeginObject();
    15779 
    15780  json.WriteString("Flags");
    15781  json.BeginArray(true);
    15782  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    15783  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    15784  {
    15785  json.WriteString("DEVICE_LOCAL");
    15786  }
    15787  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    15788  {
    15789  json.WriteString("HOST_VISIBLE");
    15790  }
    15791  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    15792  {
    15793  json.WriteString("HOST_COHERENT");
    15794  }
    15795  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    15796  {
    15797  json.WriteString("HOST_CACHED");
    15798  }
    15799  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    15800  {
    15801  json.WriteString("LAZILY_ALLOCATED");
    15802  }
    15803  json.EndArray();
    15804 
    15805  if(stats.memoryType[typeIndex].blockCount > 0)
    15806  {
    15807  json.WriteString("Stats");
    15808  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    15809  }
    15810 
    15811  json.EndObject();
    15812  }
    15813  }
    15814 
    15815  json.EndObject();
    15816  }
    15817  if(detailedMap == VK_TRUE)
    15818  {
    15819  allocator->PrintDetailedMap(json);
    15820  }
    15821 
    15822  json.EndObject();
    15823  }
    15824 
    15825  const size_t len = sb.GetLength();
    15826  char* const pChars = vma_new_array(allocator, char, len + 1);
    15827  if(len > 0)
    15828  {
    15829  memcpy(pChars, sb.GetData(), len);
    15830  }
    15831  pChars[len] = '\0';
    15832  *ppStatsString = pChars;
    15833 }
    15834 
    15835 void vmaFreeStatsString(
    15836  VmaAllocator allocator,
    15837  char* pStatsString)
    15838 {
    15839  if(pStatsString != VMA_NULL)
    15840  {
    15841  VMA_ASSERT(allocator);
    15842  size_t len = strlen(pStatsString);
    15843  vma_delete_array(allocator, pStatsString, len + 1);
    15844  }
    15845 }
    15846 
    15847 #endif // #if VMA_STATS_STRING_ENABLED
    15848 
    15849 /*
    15850 This function is not protected by any mutex because it just reads immutable data.
    15851 */
    15852 VkResult vmaFindMemoryTypeIndex(
    15853  VmaAllocator allocator,
    15854  uint32_t memoryTypeBits,
    15855  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    15856  uint32_t* pMemoryTypeIndex)
    15857 {
    15858  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    15859  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    15860  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    15861 
    15862  if(pAllocationCreateInfo->memoryTypeBits != 0)
    15863  {
    15864  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
    15865  }
    15866 
    15867  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    15868  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    15869 
    15870  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    15871  if(mapped)
    15872  {
    15873  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    15874  }
    15875 
    15876  // Convert usage to requiredFlags and preferredFlags.
    15877  switch(pAllocationCreateInfo->usage)
    15878  {
    15880  break;
    15882  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    15883  {
    15884  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    15885  }
    15886  break;
    15888  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    15889  break;
    15891  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    15892  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    15893  {
    15894  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    15895  }
    15896  break;
    15898  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    15899  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    15900  break;
    15901  default:
    15902  break;
    15903  }
    15904 
    15905  *pMemoryTypeIndex = UINT32_MAX;
    15906  uint32_t minCost = UINT32_MAX;
    15907  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    15908  memTypeIndex < allocator->GetMemoryTypeCount();
    15909  ++memTypeIndex, memTypeBit <<= 1)
    15910  {
    15911  // This memory type is acceptable according to memoryTypeBits bitmask.
    15912  if((memTypeBit & memoryTypeBits) != 0)
    15913  {
    15914  const VkMemoryPropertyFlags currFlags =
    15915  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    15916  // This memory type contains requiredFlags.
    15917  if((requiredFlags & ~currFlags) == 0)
    15918  {
    15919  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    15920  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
    15921  // Remember memory type with lowest cost.
    15922  if(currCost < minCost)
    15923  {
    15924  *pMemoryTypeIndex = memTypeIndex;
    15925  if(currCost == 0)
    15926  {
    15927  return VK_SUCCESS;
    15928  }
    15929  minCost = currCost;
    15930  }
    15931  }
    15932  }
    15933  }
    15934  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    15935 }
    15936 
    15938  VmaAllocator allocator,
    15939  const VkBufferCreateInfo* pBufferCreateInfo,
    15940  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    15941  uint32_t* pMemoryTypeIndex)
    15942 {
    15943  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    15944  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
    15945  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    15946  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    15947 
    15948  const VkDevice hDev = allocator->m_hDevice;
    15949  VkBuffer hBuffer = VK_NULL_HANDLE;
    15950  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
    15951  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
    15952  if(res == VK_SUCCESS)
    15953  {
    15954  VkMemoryRequirements memReq = {};
    15955  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
    15956  hDev, hBuffer, &memReq);
    15957 
    15958  res = vmaFindMemoryTypeIndex(
    15959  allocator,
    15960  memReq.memoryTypeBits,
    15961  pAllocationCreateInfo,
    15962  pMemoryTypeIndex);
    15963 
    15964  allocator->GetVulkanFunctions().vkDestroyBuffer(
    15965  hDev, hBuffer, allocator->GetAllocationCallbacks());
    15966  }
    15967  return res;
    15968 }
    15969 
    15971  VmaAllocator allocator,
    15972  const VkImageCreateInfo* pImageCreateInfo,
    15973  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    15974  uint32_t* pMemoryTypeIndex)
    15975 {
    15976  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    15977  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
    15978  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    15979  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    15980 
    15981  const VkDevice hDev = allocator->m_hDevice;
    15982  VkImage hImage = VK_NULL_HANDLE;
    15983  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
    15984  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
    15985  if(res == VK_SUCCESS)
    15986  {
    15987  VkMemoryRequirements memReq = {};
    15988  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
    15989  hDev, hImage, &memReq);
    15990 
    15991  res = vmaFindMemoryTypeIndex(
    15992  allocator,
    15993  memReq.memoryTypeBits,
    15994  pAllocationCreateInfo,
    15995  pMemoryTypeIndex);
    15996 
    15997  allocator->GetVulkanFunctions().vkDestroyImage(
    15998  hDev, hImage, allocator->GetAllocationCallbacks());
    15999  }
    16000  return res;
    16001 }
    16002 
    16003 VkResult vmaCreatePool(
    16004  VmaAllocator allocator,
    16005  const VmaPoolCreateInfo* pCreateInfo,
    16006  VmaPool* pPool)
    16007 {
    16008  VMA_ASSERT(allocator && pCreateInfo && pPool);
    16009 
    16010  VMA_DEBUG_LOG("vmaCreatePool");
    16011 
    16012  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16013 
    16014  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
    16015 
    16016 #if VMA_RECORDING_ENABLED
    16017  if(allocator->GetRecorder() != VMA_NULL)
    16018  {
    16019  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
    16020  }
    16021 #endif
    16022 
    16023  return res;
    16024 }
    16025 
    16026 void vmaDestroyPool(
    16027  VmaAllocator allocator,
    16028  VmaPool pool)
    16029 {
    16030  VMA_ASSERT(allocator);
    16031 
    16032  if(pool == VK_NULL_HANDLE)
    16033  {
    16034  return;
    16035  }
    16036 
    16037  VMA_DEBUG_LOG("vmaDestroyPool");
    16038 
    16039  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16040 
    16041 #if VMA_RECORDING_ENABLED
    16042  if(allocator->GetRecorder() != VMA_NULL)
    16043  {
    16044  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
    16045  }
    16046 #endif
    16047 
    16048  allocator->DestroyPool(pool);
    16049 }
    16050 
    16051 void vmaGetPoolStats(
    16052  VmaAllocator allocator,
    16053  VmaPool pool,
    16054  VmaPoolStats* pPoolStats)
    16055 {
    16056  VMA_ASSERT(allocator && pool && pPoolStats);
    16057 
    16058  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16059 
    16060  allocator->GetPoolStats(pool, pPoolStats);
    16061 }
    16062 
    16064  VmaAllocator allocator,
    16065  VmaPool pool,
    16066  size_t* pLostAllocationCount)
    16067 {
    16068  VMA_ASSERT(allocator && pool);
    16069 
    16070  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16071 
    16072 #if VMA_RECORDING_ENABLED
    16073  if(allocator->GetRecorder() != VMA_NULL)
    16074  {
    16075  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
    16076  }
    16077 #endif
    16078 
    16079  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    16080 }
    16081 
    16082 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    16083 {
    16084  VMA_ASSERT(allocator && pool);
    16085 
    16086  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16087 
    16088  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
    16089 
    16090  return allocator->CheckPoolCorruption(pool);
    16091 }
    16092 
    16093 VkResult vmaAllocateMemory(
    16094  VmaAllocator allocator,
    16095  const VkMemoryRequirements* pVkMemoryRequirements,
    16096  const VmaAllocationCreateInfo* pCreateInfo,
    16097  VmaAllocation* pAllocation,
    16098  VmaAllocationInfo* pAllocationInfo)
    16099 {
    16100  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    16101 
    16102  VMA_DEBUG_LOG("vmaAllocateMemory");
    16103 
    16104  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16105 
    16106  VkResult result = allocator->AllocateMemory(
    16107  *pVkMemoryRequirements,
    16108  false, // requiresDedicatedAllocation
    16109  false, // prefersDedicatedAllocation
    16110  VK_NULL_HANDLE, // dedicatedBuffer
    16111  VK_NULL_HANDLE, // dedicatedImage
    16112  *pCreateInfo,
    16113  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    16114  1, // allocationCount
    16115  pAllocation);
    16116 
    16117 #if VMA_RECORDING_ENABLED
    16118  if(allocator->GetRecorder() != VMA_NULL)
    16119  {
    16120  allocator->GetRecorder()->RecordAllocateMemory(
    16121  allocator->GetCurrentFrameIndex(),
    16122  *pVkMemoryRequirements,
    16123  *pCreateInfo,
    16124  *pAllocation);
    16125  }
    16126 #endif
    16127 
    16128  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    16129  {
    16130  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16131  }
    16132 
    16133  return result;
    16134 }
    16135 
    16136 VkResult vmaAllocateMemoryPages(
    16137  VmaAllocator allocator,
    16138  const VkMemoryRequirements* pVkMemoryRequirements,
    16139  const VmaAllocationCreateInfo* pCreateInfo,
    16140  size_t allocationCount,
    16141  VmaAllocation* pAllocations,
    16142  VmaAllocationInfo* pAllocationInfo)
    16143 {
    16144  if(allocationCount == 0)
    16145  {
    16146  return VK_SUCCESS;
    16147  }
    16148 
    16149  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
    16150 
    16151  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
    16152 
    16153  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16154 
    16155  VkResult result = allocator->AllocateMemory(
    16156  *pVkMemoryRequirements,
    16157  false, // requiresDedicatedAllocation
    16158  false, // prefersDedicatedAllocation
    16159  VK_NULL_HANDLE, // dedicatedBuffer
    16160  VK_NULL_HANDLE, // dedicatedImage
    16161  *pCreateInfo,
    16162  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    16163  allocationCount,
    16164  pAllocations);
    16165 
    16166 #if VMA_RECORDING_ENABLED
    16167  if(allocator->GetRecorder() != VMA_NULL)
    16168  {
    16169  allocator->GetRecorder()->RecordAllocateMemoryPages(
    16170  allocator->GetCurrentFrameIndex(),
    16171  *pVkMemoryRequirements,
    16172  *pCreateInfo,
    16173  (uint64_t)allocationCount,
    16174  pAllocations);
    16175  }
    16176 #endif
    16177 
    16178  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    16179  {
    16180  for(size_t i = 0; i < allocationCount; ++i)
    16181  {
    16182  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
    16183  }
    16184  }
    16185 
    16186  return result;
    16187 }
    16188 
    16190  VmaAllocator allocator,
    16191  VkBuffer buffer,
    16192  const VmaAllocationCreateInfo* pCreateInfo,
    16193  VmaAllocation* pAllocation,
    16194  VmaAllocationInfo* pAllocationInfo)
    16195 {
    16196  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    16197 
    16198  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    16199 
    16200  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16201 
    16202  VkMemoryRequirements vkMemReq = {};
    16203  bool requiresDedicatedAllocation = false;
    16204  bool prefersDedicatedAllocation = false;
    16205  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    16206  requiresDedicatedAllocation,
    16207  prefersDedicatedAllocation);
    16208 
    16209  VkResult result = allocator->AllocateMemory(
    16210  vkMemReq,
    16211  requiresDedicatedAllocation,
    16212  prefersDedicatedAllocation,
    16213  buffer, // dedicatedBuffer
    16214  VK_NULL_HANDLE, // dedicatedImage
    16215  *pCreateInfo,
    16216  VMA_SUBALLOCATION_TYPE_BUFFER,
    16217  1, // allocationCount
    16218  pAllocation);
    16219 
    16220 #if VMA_RECORDING_ENABLED
    16221  if(allocator->GetRecorder() != VMA_NULL)
    16222  {
    16223  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
    16224  allocator->GetCurrentFrameIndex(),
    16225  vkMemReq,
    16226  requiresDedicatedAllocation,
    16227  prefersDedicatedAllocation,
    16228  *pCreateInfo,
    16229  *pAllocation);
    16230  }
    16231 #endif
    16232 
    16233  if(pAllocationInfo && result == VK_SUCCESS)
    16234  {
    16235  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16236  }
    16237 
    16238  return result;
    16239 }
    16240 
    16241 VkResult vmaAllocateMemoryForImage(
    16242  VmaAllocator allocator,
    16243  VkImage image,
    16244  const VmaAllocationCreateInfo* pCreateInfo,
    16245  VmaAllocation* pAllocation,
    16246  VmaAllocationInfo* pAllocationInfo)
    16247 {
    16248  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    16249 
    16250  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    16251 
    16252  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16253 
    16254  VkMemoryRequirements vkMemReq = {};
    16255  bool requiresDedicatedAllocation = false;
    16256  bool prefersDedicatedAllocation = false;
    16257  allocator->GetImageMemoryRequirements(image, vkMemReq,
    16258  requiresDedicatedAllocation, prefersDedicatedAllocation);
    16259 
    16260  VkResult result = allocator->AllocateMemory(
    16261  vkMemReq,
    16262  requiresDedicatedAllocation,
    16263  prefersDedicatedAllocation,
    16264  VK_NULL_HANDLE, // dedicatedBuffer
    16265  image, // dedicatedImage
    16266  *pCreateInfo,
    16267  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    16268  1, // allocationCount
    16269  pAllocation);
    16270 
    16271 #if VMA_RECORDING_ENABLED
    16272  if(allocator->GetRecorder() != VMA_NULL)
    16273  {
    16274  allocator->GetRecorder()->RecordAllocateMemoryForImage(
    16275  allocator->GetCurrentFrameIndex(),
    16276  vkMemReq,
    16277  requiresDedicatedAllocation,
    16278  prefersDedicatedAllocation,
    16279  *pCreateInfo,
    16280  *pAllocation);
    16281  }
    16282 #endif
    16283 
    16284  if(pAllocationInfo && result == VK_SUCCESS)
    16285  {
    16286  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16287  }
    16288 
    16289  return result;
    16290 }
    16291 
    16292 void vmaFreeMemory(
    16293  VmaAllocator allocator,
    16294  VmaAllocation allocation)
    16295 {
    16296  VMA_ASSERT(allocator);
    16297 
    16298  if(allocation == VK_NULL_HANDLE)
    16299  {
    16300  return;
    16301  }
    16302 
    16303  VMA_DEBUG_LOG("vmaFreeMemory");
    16304 
    16305  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16306 
    16307 #if VMA_RECORDING_ENABLED
    16308  if(allocator->GetRecorder() != VMA_NULL)
    16309  {
    16310  allocator->GetRecorder()->RecordFreeMemory(
    16311  allocator->GetCurrentFrameIndex(),
    16312  allocation);
    16313  }
    16314 #endif
    16315 
    16316  allocator->FreeMemory(
    16317  1, // allocationCount
    16318  &allocation);
    16319 }
    16320 
    16321 void vmaFreeMemoryPages(
    16322  VmaAllocator allocator,
    16323  size_t allocationCount,
    16324  VmaAllocation* pAllocations)
    16325 {
    16326  if(allocationCount == 0)
    16327  {
    16328  return;
    16329  }
    16330 
    16331  VMA_ASSERT(allocator);
    16332 
    16333  VMA_DEBUG_LOG("vmaFreeMemoryPages");
    16334 
    16335  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16336 
    16337 #if VMA_RECORDING_ENABLED
    16338  if(allocator->GetRecorder() != VMA_NULL)
    16339  {
    16340  allocator->GetRecorder()->RecordFreeMemoryPages(
    16341  allocator->GetCurrentFrameIndex(),
    16342  (uint64_t)allocationCount,
    16343  pAllocations);
    16344  }
    16345 #endif
    16346 
    16347  allocator->FreeMemory(allocationCount, pAllocations);
    16348 }
    16349 
    16350 VkResult vmaResizeAllocation(
    16351  VmaAllocator allocator,
    16352  VmaAllocation allocation,
    16353  VkDeviceSize newSize)
    16354 {
    16355  VMA_ASSERT(allocator && allocation);
    16356 
    16357  VMA_DEBUG_LOG("vmaResizeAllocation");
    16358 
    16359  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16360 
    16361 #if VMA_RECORDING_ENABLED
    16362  if(allocator->GetRecorder() != VMA_NULL)
    16363  {
    16364  allocator->GetRecorder()->RecordResizeAllocation(
    16365  allocator->GetCurrentFrameIndex(),
    16366  allocation,
    16367  newSize);
    16368  }
    16369 #endif
    16370 
    16371  return allocator->ResizeAllocation(allocation, newSize);
    16372 }
    16373 
    16375  VmaAllocator allocator,
    16376  VmaAllocation allocation,
    16377  VmaAllocationInfo* pAllocationInfo)
    16378 {
    16379  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    16380 
    16381  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16382 
    16383 #if VMA_RECORDING_ENABLED
    16384  if(allocator->GetRecorder() != VMA_NULL)
    16385  {
    16386  allocator->GetRecorder()->RecordGetAllocationInfo(
    16387  allocator->GetCurrentFrameIndex(),
    16388  allocation);
    16389  }
    16390 #endif
    16391 
    16392  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    16393 }
    16394 
    16395 VkBool32 vmaTouchAllocation(
    16396  VmaAllocator allocator,
    16397  VmaAllocation allocation)
    16398 {
    16399  VMA_ASSERT(allocator && allocation);
    16400 
    16401  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16402 
    16403 #if VMA_RECORDING_ENABLED
    16404  if(allocator->GetRecorder() != VMA_NULL)
    16405  {
    16406  allocator->GetRecorder()->RecordTouchAllocation(
    16407  allocator->GetCurrentFrameIndex(),
    16408  allocation);
    16409  }
    16410 #endif
    16411 
    16412  return allocator->TouchAllocation(allocation);
    16413 }
    16414 
    16416  VmaAllocator allocator,
    16417  VmaAllocation allocation,
    16418  void* pUserData)
    16419 {
    16420  VMA_ASSERT(allocator && allocation);
    16421 
    16422  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16423 
    16424  allocation->SetUserData(allocator, pUserData);
    16425 
    16426 #if VMA_RECORDING_ENABLED
    16427  if(allocator->GetRecorder() != VMA_NULL)
    16428  {
    16429  allocator->GetRecorder()->RecordSetAllocationUserData(
    16430  allocator->GetCurrentFrameIndex(),
    16431  allocation,
    16432  pUserData);
    16433  }
    16434 #endif
    16435 }
    16436 
    16438  VmaAllocator allocator,
    16439  VmaAllocation* pAllocation)
    16440 {
    16441  VMA_ASSERT(allocator && pAllocation);
    16442 
    16443  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    16444 
    16445  allocator->CreateLostAllocation(pAllocation);
    16446 
    16447 #if VMA_RECORDING_ENABLED
    16448  if(allocator->GetRecorder() != VMA_NULL)
    16449  {
    16450  allocator->GetRecorder()->RecordCreateLostAllocation(
    16451  allocator->GetCurrentFrameIndex(),
    16452  *pAllocation);
    16453  }
    16454 #endif
    16455 }
    16456 
    16457 VkResult vmaMapMemory(
    16458  VmaAllocator allocator,
    16459  VmaAllocation allocation,
    16460  void** ppData)
    16461 {
    16462  VMA_ASSERT(allocator && allocation && ppData);
    16463 
    16464  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16465 
    16466  VkResult res = allocator->Map(allocation, ppData);
    16467 
    16468 #if VMA_RECORDING_ENABLED
    16469  if(allocator->GetRecorder() != VMA_NULL)
    16470  {
    16471  allocator->GetRecorder()->RecordMapMemory(
    16472  allocator->GetCurrentFrameIndex(),
    16473  allocation);
    16474  }
    16475 #endif
    16476 
    16477  return res;
    16478 }
    16479 
    16480 void vmaUnmapMemory(
    16481  VmaAllocator allocator,
    16482  VmaAllocation allocation)
    16483 {
    16484  VMA_ASSERT(allocator && allocation);
    16485 
    16486  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16487 
    16488 #if VMA_RECORDING_ENABLED
    16489  if(allocator->GetRecorder() != VMA_NULL)
    16490  {
    16491  allocator->GetRecorder()->RecordUnmapMemory(
    16492  allocator->GetCurrentFrameIndex(),
    16493  allocation);
    16494  }
    16495 #endif
    16496 
    16497  allocator->Unmap(allocation);
    16498 }
    16499 
    16500 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    16501 {
    16502  VMA_ASSERT(allocator && allocation);
    16503 
    16504  VMA_DEBUG_LOG("vmaFlushAllocation");
    16505 
    16506  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16507 
    16508  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
    16509 
    16510 #if VMA_RECORDING_ENABLED
    16511  if(allocator->GetRecorder() != VMA_NULL)
    16512  {
    16513  allocator->GetRecorder()->RecordFlushAllocation(
    16514  allocator->GetCurrentFrameIndex(),
    16515  allocation, offset, size);
    16516  }
    16517 #endif
    16518 }
    16519 
    16520 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    16521 {
    16522  VMA_ASSERT(allocator && allocation);
    16523 
    16524  VMA_DEBUG_LOG("vmaInvalidateAllocation");
    16525 
    16526  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16527 
    16528  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
    16529 
    16530 #if VMA_RECORDING_ENABLED
    16531  if(allocator->GetRecorder() != VMA_NULL)
    16532  {
    16533  allocator->GetRecorder()->RecordInvalidateAllocation(
    16534  allocator->GetCurrentFrameIndex(),
    16535  allocation, offset, size);
    16536  }
    16537 #endif
    16538 }
    16539 
    16540 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    16541 {
    16542  VMA_ASSERT(allocator);
    16543 
    16544  VMA_DEBUG_LOG("vmaCheckCorruption");
    16545 
    16546  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16547 
    16548  return allocator->CheckCorruption(memoryTypeBits);
    16549 }
    16550 
    16551 VkResult vmaDefragment(
    16552  VmaAllocator allocator,
    16553  VmaAllocation* pAllocations,
    16554  size_t allocationCount,
    16555  VkBool32* pAllocationsChanged,
    16556  const VmaDefragmentationInfo *pDefragmentationInfo,
    16557  VmaDefragmentationStats* pDefragmentationStats)
    16558 {
    16559  // Deprecated interface, reimplemented using new one.
    16560 
    16561  VmaDefragmentationInfo2 info2 = {};
    16562  info2.allocationCount = (uint32_t)allocationCount;
    16563  info2.pAllocations = pAllocations;
    16564  info2.pAllocationsChanged = pAllocationsChanged;
    16565  if(pDefragmentationInfo != VMA_NULL)
    16566  {
    16567  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    16568  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
    16569  }
    16570  else
    16571  {
    16572  info2.maxCpuAllocationsToMove = UINT32_MAX;
    16573  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
    16574  }
    16575  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
    16576 
    16578  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
    16579  if(res == VK_NOT_READY)
    16580  {
    16581  res = vmaDefragmentationEnd( allocator, ctx);
    16582  }
    16583  return res;
    16584 }
    16585 
    16586 VkResult vmaDefragmentationBegin(
    16587  VmaAllocator allocator,
    16588  const VmaDefragmentationInfo2* pInfo,
    16589  VmaDefragmentationStats* pStats,
    16590  VmaDefragmentationContext *pContext)
    16591 {
    16592  VMA_ASSERT(allocator && pInfo && pContext);
    16593 
    16594  // Degenerate case: Nothing to defragment.
    16595  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
    16596  {
    16597  return VK_SUCCESS;
    16598  }
    16599 
    16600  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
    16601  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
    16602  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
    16603  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
    16604 
    16605  VMA_DEBUG_LOG("vmaDefragmentationBegin");
    16606 
    16607  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16608 
    16609  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
    16610 
    16611 #if VMA_RECORDING_ENABLED
    16612  if(allocator->GetRecorder() != VMA_NULL)
    16613  {
    16614  allocator->GetRecorder()->RecordDefragmentationBegin(
    16615  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
    16616  }
    16617 #endif
    16618 
    16619  return res;
    16620 }
    16621 
    16622 VkResult vmaDefragmentationEnd(
    16623  VmaAllocator allocator,
    16624  VmaDefragmentationContext context)
    16625 {
    16626  VMA_ASSERT(allocator);
    16627 
    16628  VMA_DEBUG_LOG("vmaDefragmentationEnd");
    16629 
    16630  if(context != VK_NULL_HANDLE)
    16631  {
    16632  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16633 
    16634 #if VMA_RECORDING_ENABLED
    16635  if(allocator->GetRecorder() != VMA_NULL)
    16636  {
    16637  allocator->GetRecorder()->RecordDefragmentationEnd(
    16638  allocator->GetCurrentFrameIndex(), context);
    16639  }
    16640 #endif
    16641 
    16642  return allocator->DefragmentationEnd(context);
    16643  }
    16644  else
    16645  {
    16646  return VK_SUCCESS;
    16647  }
    16648 }
    16649 
    16650 VkResult vmaBindBufferMemory(
    16651  VmaAllocator allocator,
    16652  VmaAllocation allocation,
    16653  VkBuffer buffer)
    16654 {
    16655  VMA_ASSERT(allocator && allocation && buffer);
    16656 
    16657  VMA_DEBUG_LOG("vmaBindBufferMemory");
    16658 
    16659  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16660 
    16661  return allocator->BindBufferMemory(allocation, buffer);
    16662 }
    16663 
    16664 VkResult vmaBindImageMemory(
    16665  VmaAllocator allocator,
    16666  VmaAllocation allocation,
    16667  VkImage image)
    16668 {
    16669  VMA_ASSERT(allocator && allocation && image);
    16670 
    16671  VMA_DEBUG_LOG("vmaBindImageMemory");
    16672 
    16673  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16674 
    16675  return allocator->BindImageMemory(allocation, image);
    16676 }
    16677 
    16678 VkResult vmaCreateBuffer(
    16679  VmaAllocator allocator,
    16680  const VkBufferCreateInfo* pBufferCreateInfo,
    16681  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    16682  VkBuffer* pBuffer,
    16683  VmaAllocation* pAllocation,
    16684  VmaAllocationInfo* pAllocationInfo)
    16685 {
    16686  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    16687 
    16688  if(pBufferCreateInfo->size == 0)
    16689  {
    16690  return VK_ERROR_VALIDATION_FAILED_EXT;
    16691  }
    16692 
    16693  VMA_DEBUG_LOG("vmaCreateBuffer");
    16694 
    16695  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16696 
    16697  *pBuffer = VK_NULL_HANDLE;
    16698  *pAllocation = VK_NULL_HANDLE;
    16699 
    16700  // 1. Create VkBuffer.
    16701  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    16702  allocator->m_hDevice,
    16703  pBufferCreateInfo,
    16704  allocator->GetAllocationCallbacks(),
    16705  pBuffer);
    16706  if(res >= 0)
    16707  {
    16708  // 2. vkGetBufferMemoryRequirements.
    16709  VkMemoryRequirements vkMemReq = {};
    16710  bool requiresDedicatedAllocation = false;
    16711  bool prefersDedicatedAllocation = false;
    16712  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    16713  requiresDedicatedAllocation, prefersDedicatedAllocation);
    16714 
    16715  // Make sure alignment requirements for specific buffer usages reported
    16716  // in Physical Device Properties are included in alignment reported by memory requirements.
    16717  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
    16718  {
    16719  VMA_ASSERT(vkMemReq.alignment %
    16720  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
    16721  }
    16722  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
    16723  {
    16724  VMA_ASSERT(vkMemReq.alignment %
    16725  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
    16726  }
    16727  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
    16728  {
    16729  VMA_ASSERT(vkMemReq.alignment %
    16730  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
    16731  }
    16732 
    16733  // 3. Allocate memory using allocator.
    16734  res = allocator->AllocateMemory(
    16735  vkMemReq,
    16736  requiresDedicatedAllocation,
    16737  prefersDedicatedAllocation,
    16738  *pBuffer, // dedicatedBuffer
    16739  VK_NULL_HANDLE, // dedicatedImage
    16740  *pAllocationCreateInfo,
    16741  VMA_SUBALLOCATION_TYPE_BUFFER,
    16742  1, // allocationCount
    16743  pAllocation);
    16744 
    16745 #if VMA_RECORDING_ENABLED
    16746  if(allocator->GetRecorder() != VMA_NULL)
    16747  {
    16748  allocator->GetRecorder()->RecordCreateBuffer(
    16749  allocator->GetCurrentFrameIndex(),
    16750  *pBufferCreateInfo,
    16751  *pAllocationCreateInfo,
    16752  *pAllocation);
    16753  }
    16754 #endif
    16755 
    16756  if(res >= 0)
    16757  {
    16758  // 3. Bind buffer with memory.
    16759  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
    16760  {
    16761  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
    16762  }
    16763  if(res >= 0)
    16764  {
    16765  // All steps succeeded.
    16766  #if VMA_STATS_STRING_ENABLED
    16767  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
    16768  #endif
    16769  if(pAllocationInfo != VMA_NULL)
    16770  {
    16771  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16772  }
    16773 
    16774  return VK_SUCCESS;
    16775  }
    16776  allocator->FreeMemory(
    16777  1, // allocationCount
    16778  pAllocation);
    16779  *pAllocation = VK_NULL_HANDLE;
    16780  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    16781  *pBuffer = VK_NULL_HANDLE;
    16782  return res;
    16783  }
    16784  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    16785  *pBuffer = VK_NULL_HANDLE;
    16786  return res;
    16787  }
    16788  return res;
    16789 }
    16790 
    16791 void vmaDestroyBuffer(
    16792  VmaAllocator allocator,
    16793  VkBuffer buffer,
    16794  VmaAllocation allocation)
    16795 {
    16796  VMA_ASSERT(allocator);
    16797 
    16798  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    16799  {
    16800  return;
    16801  }
    16802 
    16803  VMA_DEBUG_LOG("vmaDestroyBuffer");
    16804 
    16805  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16806 
    16807 #if VMA_RECORDING_ENABLED
    16808  if(allocator->GetRecorder() != VMA_NULL)
    16809  {
    16810  allocator->GetRecorder()->RecordDestroyBuffer(
    16811  allocator->GetCurrentFrameIndex(),
    16812  allocation);
    16813  }
    16814 #endif
    16815 
    16816  if(buffer != VK_NULL_HANDLE)
    16817  {
    16818  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    16819  }
    16820 
    16821  if(allocation != VK_NULL_HANDLE)
    16822  {
    16823  allocator->FreeMemory(
    16824  1, // allocationCount
    16825  &allocation);
    16826  }
    16827 }
    16828 
    16829 VkResult vmaCreateImage(
    16830  VmaAllocator allocator,
    16831  const VkImageCreateInfo* pImageCreateInfo,
    16832  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    16833  VkImage* pImage,
    16834  VmaAllocation* pAllocation,
    16835  VmaAllocationInfo* pAllocationInfo)
    16836 {
    16837  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    16838 
    16839  if(pImageCreateInfo->extent.width == 0 ||
    16840  pImageCreateInfo->extent.height == 0 ||
    16841  pImageCreateInfo->extent.depth == 0 ||
    16842  pImageCreateInfo->mipLevels == 0 ||
    16843  pImageCreateInfo->arrayLayers == 0)
    16844  {
    16845  return VK_ERROR_VALIDATION_FAILED_EXT;
    16846  }
    16847 
    16848  VMA_DEBUG_LOG("vmaCreateImage");
    16849 
    16850  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16851 
    16852  *pImage = VK_NULL_HANDLE;
    16853  *pAllocation = VK_NULL_HANDLE;
    16854 
    16855  // 1. Create VkImage.
    16856  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    16857  allocator->m_hDevice,
    16858  pImageCreateInfo,
    16859  allocator->GetAllocationCallbacks(),
    16860  pImage);
    16861  if(res >= 0)
    16862  {
    16863  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    16864  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    16865  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    16866 
    16867  // 2. Allocate memory using allocator.
    16868  VkMemoryRequirements vkMemReq = {};
    16869  bool requiresDedicatedAllocation = false;
    16870  bool prefersDedicatedAllocation = false;
    16871  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
    16872  requiresDedicatedAllocation, prefersDedicatedAllocation);
    16873 
    16874  res = allocator->AllocateMemory(
    16875  vkMemReq,
    16876  requiresDedicatedAllocation,
    16877  prefersDedicatedAllocation,
    16878  VK_NULL_HANDLE, // dedicatedBuffer
    16879  *pImage, // dedicatedImage
    16880  *pAllocationCreateInfo,
    16881  suballocType,
    16882  1, // allocationCount
    16883  pAllocation);
    16884 
    16885 #if VMA_RECORDING_ENABLED
    16886  if(allocator->GetRecorder() != VMA_NULL)
    16887  {
    16888  allocator->GetRecorder()->RecordCreateImage(
    16889  allocator->GetCurrentFrameIndex(),
    16890  *pImageCreateInfo,
    16891  *pAllocationCreateInfo,
    16892  *pAllocation);
    16893  }
    16894 #endif
    16895 
    16896  if(res >= 0)
    16897  {
    16898  // 3. Bind image with memory.
    16899  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
    16900  {
    16901  res = allocator->BindImageMemory(*pAllocation, *pImage);
    16902  }
    16903  if(res >= 0)
    16904  {
    16905  // All steps succeeded.
    16906  #if VMA_STATS_STRING_ENABLED
    16907  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
    16908  #endif
    16909  if(pAllocationInfo != VMA_NULL)
    16910  {
    16911  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16912  }
    16913 
    16914  return VK_SUCCESS;
    16915  }
    16916  allocator->FreeMemory(
    16917  1, // allocationCount
    16918  pAllocation);
    16919  *pAllocation = VK_NULL_HANDLE;
    16920  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    16921  *pImage = VK_NULL_HANDLE;
    16922  return res;
    16923  }
    16924  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    16925  *pImage = VK_NULL_HANDLE;
    16926  return res;
    16927  }
    16928  return res;
    16929 }
    16930 
    16931 void vmaDestroyImage(
    16932  VmaAllocator allocator,
    16933  VkImage image,
    16934  VmaAllocation allocation)
    16935 {
    16936  VMA_ASSERT(allocator);
    16937 
    16938  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    16939  {
    16940  return;
    16941  }
    16942 
    16943  VMA_DEBUG_LOG("vmaDestroyImage");
    16944 
    16945  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16946 
    16947 #if VMA_RECORDING_ENABLED
    16948  if(allocator->GetRecorder() != VMA_NULL)
    16949  {
    16950  allocator->GetRecorder()->RecordDestroyImage(
    16951  allocator->GetCurrentFrameIndex(),
    16952  allocation);
    16953  }
    16954 #endif
    16955 
    16956  if(image != VK_NULL_HANDLE)
    16957  {
    16958  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    16959  }
    16960  if(allocation != VK_NULL_HANDLE)
    16961  {
    16962  allocator->FreeMemory(
    16963  1, // allocationCount
    16964  &allocation);
    16965  }
    16966 }
    16967 
    16968 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:1753
    +
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:2053
    void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
    Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
    VkPhysicalDevice physicalDevice
    Vulkan physical device.
    Definition: vk_mem_alloc.h:1811
    -
    uint32_t maxCpuAllocationsToMove
    Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
    Definition: vk_mem_alloc.h:2848
    +
    uint32_t maxCpuAllocationsToMove
    Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
    Definition: vk_mem_alloc.h:2856
    VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
    Deprecated. Compacts memory by moving allocations.
    void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    Invalidates memory of given allocation.
    Represents single memory allocation.
    Definition: vk_mem_alloc.h:1785
    -
    size_t blockCount
    Number of VkDeviceMemory blocks allocated for this pool.
    Definition: vk_mem_alloc.h:2376
    +
    size_t blockCount
    Number of VkDeviceMemory blocks allocated for this pool.
    Definition: vk_mem_alloc.h:2384
    PFN_vkCreateBuffer vkCreateBuffer
    Definition: vk_mem_alloc.h:1765
    void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
    struct VmaStats VmaStats
    General statistics from current state of Allocator.
    -
    Definition: vk_mem_alloc.h:2013
    -
    Definition: vk_mem_alloc.h:2111
    -
    VmaDefragmentationFlags flags
    Reserved for future use. Should be 0.
    Definition: vk_mem_alloc.h:2801
    +
    Definition: vk_mem_alloc.h:2015
    +
    Definition: vk_mem_alloc.h:2119
    +
    VmaDefragmentationFlags flags
    Reserved for future use. Should be 0.
    Definition: vk_mem_alloc.h:2809
    PFN_vkMapMemory vkMapMemory
    Definition: vk_mem_alloc.h:1757
    -
    VkDeviceMemory deviceMemory
    Handle to Vulkan memory object.
    Definition: vk_mem_alloc.h:2476
    +
    VkDeviceMemory deviceMemory
    Handle to Vulkan memory object.
    Definition: vk_mem_alloc.h:2484
    VmaAllocatorCreateFlags flags
    Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1808
    -
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:2884
    -
    Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
    Definition: vk_mem_alloc.h:2265
    +
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:2892
    +
    Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
    Definition: vk_mem_alloc.h:2273
    #define VMA_RECORDING_ENABLED
    Definition: vk_mem_alloc.h:1652
    void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
    Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
    -
    VkDeviceSize size
    Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
    Definition: vk_mem_alloc.h:2357
    -
    Definition: vk_mem_alloc.h:2088
    -
    uint32_t allocationCount
    Number of allocations in pAllocations array.
    Definition: vk_mem_alloc.h:2804
    +
    VkDeviceSize size
    Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
    Definition: vk_mem_alloc.h:2365
    +
    Definition: vk_mem_alloc.h:2090
    +
    uint32_t allocationCount
    Number of allocations in pAllocations array.
    Definition: vk_mem_alloc.h:2812
    VkFlags VmaAllocatorCreateFlags
    Definition: vk_mem_alloc.h:1746
    -
    VkMemoryPropertyFlags preferredFlags
    Flags that preferably should be set in a memory type chosen for an allocation.
    Definition: vk_mem_alloc.h:2164
    -
    Definition: vk_mem_alloc.h:2040
    +
    VkMemoryPropertyFlags preferredFlags
    Flags that preferably should be set in a memory type chosen for an allocation.
    Definition: vk_mem_alloc.h:2172
    +
    Definition: vk_mem_alloc.h:2042
    const VkAllocationCallbacks * pAllocationCallbacks
    Custom CPU memory allocation callbacks. Optional.
    Definition: vk_mem_alloc.h:1820
    -
    Enables alternative, buddy allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2293
    +
    Enables alternative, buddy allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2301
    void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
    Retrieves statistics from current state of the Allocator.
    const VmaVulkanFunctions * pVulkanFunctions
    Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1.
    Definition: vk_mem_alloc.h:1874
    Description of a Allocator to be created.
    Definition: vk_mem_alloc.h:1805
    void vmaDestroyAllocator(VmaAllocator allocator)
    Destroys allocator object.
    -
    VmaAllocationCreateFlagBits
    Flags to be passed as VmaAllocationCreateInfo::flags.
    Definition: vk_mem_alloc.h:2044
    +
    VmaAllocationCreateFlagBits
    Flags to be passed as VmaAllocationCreateInfo::flags.
    Definition: vk_mem_alloc.h:2046
    void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
    Returns current information about specified allocation and atomically marks it as used in current fra...
    VkDeviceSize allocationSizeMax
    Definition: vk_mem_alloc.h:1946
    PFN_vkBindImageMemory vkBindImageMemory
    Definition: vk_mem_alloc.h:1762
    -
    VmaPool * pPools
    Either null or pointer to array of pools to be defragmented.
    Definition: vk_mem_alloc.h:2838
    +
    VmaPool * pPools
    Either null or pointer to array of pools to be defragmented.
    Definition: vk_mem_alloc.h:2846
    VkDeviceSize unusedBytes
    Total number of bytes occupied by unused ranges.
    Definition: vk_mem_alloc.h:1945
    -
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:2888
    +
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:2896
    void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
    Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(),...
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:1837
    VmaStatInfo total
    Definition: vk_mem_alloc.h:1955
    -
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:2896
    -
    VmaAllocationCreateFlags flags
    Use VmaAllocationCreateFlagBits enum.
    Definition: vk_mem_alloc.h:2148
    -
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places.
    Definition: vk_mem_alloc.h:2879
    +
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:2904
    +
    VmaAllocationCreateFlags flags
    Use VmaAllocationCreateFlagBits enum.
    Definition: vk_mem_alloc.h:2156
    +
    Definition: vk_mem_alloc.h:2114
    +
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places.
    Definition: vk_mem_alloc.h:2887
    PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
    Definition: vk_mem_alloc.h:1763
    void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called after successful vkAllocateMemory.
    Definition: vk_mem_alloc.h:1688
    Represents main object of this library initialized.
    VkDevice device
    Vulkan device.
    Definition: vk_mem_alloc.h:1814
    void vmaFreeMemoryPages(VmaAllocator allocator, size_t allocationCount, VmaAllocation *pAllocations)
    Frees memory and destroys multiple allocations.
    VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
    Binds buffer to allocation.
    -
    Describes parameter of created VmaPool.
    Definition: vk_mem_alloc.h:2307
    -
    Definition: vk_mem_alloc.h:2301
    +
    Describes parameter of created VmaPool.
    Definition: vk_mem_alloc.h:2315
    +
    Definition: vk_mem_alloc.h:2309
    PFN_vkCmdCopyBuffer vkCmdCopyBuffer
    Definition: vk_mem_alloc.h:1769
    const VmaRecordSettings * pRecordSettings
    Parameters for recording of VMA calls. Can be null.
    Definition: vk_mem_alloc.h:1881
    -
    VkDeviceSize size
    Size of this allocation, in bytes.
    Definition: vk_mem_alloc.h:2486
    +
    VkDeviceSize size
    Size of this allocation, in bytes.
    Definition: vk_mem_alloc.h:2494
    void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
    Given Memory Type Index, returns Property Flags of this memory type.
    PFN_vkUnmapMemory vkUnmapMemory
    Definition: vk_mem_alloc.h:1758
    VkResult vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
    Begins defragmentation process.
    Enables flush after recording every function call.
    Definition: vk_mem_alloc.h:1783
    -
    void * pUserData
    Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
    Definition: vk_mem_alloc.h:2185
    -
    size_t minBlockCount
    Minimum number of blocks to be always allocated in this pool, even if they stay empty.
    Definition: vk_mem_alloc.h:2327
    -
    size_t allocationCount
    Number of VmaAllocation objects created from this pool that were not destroyed or lost.
    Definition: vk_mem_alloc.h:2363
    +
    void * pUserData
    Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
    Definition: vk_mem_alloc.h:2193
    +
    size_t minBlockCount
    Minimum number of blocks to be always allocated in this pool, even if they stay empty.
    Definition: vk_mem_alloc.h:2335
    +
    size_t allocationCount
    Number of VmaAllocation objects created from this pool that were not destroyed or lost.
    Definition: vk_mem_alloc.h:2371
    struct VmaVulkanFunctions VmaVulkanFunctions
    Pointers to some Vulkan functions - a subset used by the library.
    Definition: vk_mem_alloc.h:1744
    -
    uint32_t memoryTypeIndex
    Vulkan memory type index to allocate this pool from.
    Definition: vk_mem_alloc.h:2310
    +
    uint32_t memoryTypeIndex
    Vulkan memory type index to allocate this pool from.
    Definition: vk_mem_alloc.h:2318
    VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
    -
    VkDeviceSize maxGpuBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
    Definition: vk_mem_alloc.h:2853
    -
    VmaMemoryUsage
    Definition: vk_mem_alloc.h:1991
    +
    VkDeviceSize maxGpuBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
    Definition: vk_mem_alloc.h:2861
    +
    VmaMemoryUsage
    Definition: vk_mem_alloc.h:1993
    struct VmaAllocationInfo VmaAllocationInfo
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    -
    VmaAllocation * pAllocations
    Pointer to array of allocations that can be defragmented.
    Definition: vk_mem_alloc.h:2813
    +
    VmaAllocation * pAllocations
    Pointer to array of allocations that can be defragmented.
    Definition: vk_mem_alloc.h:2821
    void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    Flushes memory of given allocation.
    -
    Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:2874
    +
    Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:2882
    struct VmaPoolCreateInfo VmaPoolCreateInfo
    Describes parameter of created VmaPool.
    void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
    Destroys VmaPool object and frees Vulkan device memory.
    -
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
    Definition: vk_mem_alloc.h:2892
    -
    Definition: vk_mem_alloc.h:2030
    -
    uint32_t memoryTypeBits
    Bitmask containing one bit set for every memory type acceptable for this allocation.
    Definition: vk_mem_alloc.h:2172
    +
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
    Definition: vk_mem_alloc.h:2900
    +
    Definition: vk_mem_alloc.h:2032
    +
    uint32_t memoryTypeBits
    Bitmask containing one bit set for every memory type acceptable for this allocation.
    Definition: vk_mem_alloc.h:2180
    PFN_vkBindBufferMemory vkBindBufferMemory
    Definition: vk_mem_alloc.h:1761
    Represents custom memory pool.
    void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
    Retrieves statistics of existing VmaPool object.
    @@ -153,123 +154,123 @@ $(function() {
    VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
    Ends defragmentation process.
    General statistics from current state of Allocator.
    Definition: vk_mem_alloc.h:1951
    void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called before vkFreeMemory.
    Definition: vk_mem_alloc.h:1694
    -
    VkFlags VmaDefragmentationFlags
    Definition: vk_mem_alloc.h:2792
    +
    VkFlags VmaDefragmentationFlags
    Definition: vk_mem_alloc.h:2800
    void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
    Sets pUserData in given allocation to new value.
    -
    Definition: vk_mem_alloc.h:2790
    - -
    VkBool32 * pAllocationsChanged
    Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
    Definition: vk_mem_alloc.h:2819
    +
    Definition: vk_mem_alloc.h:2798
    + +
    VkBool32 * pAllocationsChanged
    Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
    Definition: vk_mem_alloc.h:2827
    VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
    Allocates Vulkan device memory and creates VmaPool object.
    VmaAllocatorCreateFlagBits
    Flags for created VmaAllocator.
    Definition: vk_mem_alloc.h:1715
    VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
    Binds image to allocation.
    struct VmaStatInfo VmaStatInfo
    Calculated statistics of memory usage in entire allocator.
    VkFlags VmaRecordFlags
    Definition: vk_mem_alloc.h:1787
    Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
    Definition: vk_mem_alloc.h:1720
    -
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:2894
    +
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:2902
    void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
    Creates new allocation that is in lost state from the beginning.
    -
    VkMemoryPropertyFlags requiredFlags
    Flags that must be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:2159
    -
    VkDeviceSize unusedRangeSizeMax
    Size of the largest continuous free memory region available for new allocation.
    Definition: vk_mem_alloc.h:2373
    +
    VkMemoryPropertyFlags requiredFlags
    Flags that must be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:2167
    +
    VkDeviceSize unusedRangeSizeMax
    Size of the largest continuous free memory region available for new allocation.
    Definition: vk_mem_alloc.h:2381
    void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
    Builds and returns statistics as string in JSON format.
    PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
    Definition: vk_mem_alloc.h:1754
    Calculated statistics of memory usage in entire allocator.
    Definition: vk_mem_alloc.h:1934
    -
    VkDeviceSize blockSize
    Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes....
    Definition: vk_mem_alloc.h:2322
    +
    VkDeviceSize blockSize
    Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes....
    Definition: vk_mem_alloc.h:2330
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    Definition: vk_mem_alloc.h:1707
    -
    Definition: vk_mem_alloc.h:2297
    +
    Definition: vk_mem_alloc.h:2305
    VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    -
    Definition: vk_mem_alloc.h:2095
    +
    Definition: vk_mem_alloc.h:2097
    Represents Opaque object that represents started defragmentation process.
    VkDeviceSize unusedRangeSizeMin
    Definition: vk_mem_alloc.h:1947
    PFN_vmaFreeDeviceMemoryFunction pfnFree
    Optional, can be null.
    Definition: vk_mem_alloc.h:1711
    -
    Definition: vk_mem_alloc.h:2122
    -
    VmaPoolCreateFlags flags
    Use combination of VmaPoolCreateFlagBits.
    Definition: vk_mem_alloc.h:2313
    -
    Definition: vk_mem_alloc.h:2039
    +
    Definition: vk_mem_alloc.h:2130
    +
    VmaPoolCreateFlags flags
    Use combination of VmaPoolCreateFlagBits.
    Definition: vk_mem_alloc.h:2321
    +
    Definition: vk_mem_alloc.h:2041
    PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
    Definition: vk_mem_alloc.h:1760
    struct VmaPoolStats VmaPoolStats
    Describes parameter of existing VmaPool.
    VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    Function similar to vmaCreateBuffer().
    -
    VmaMemoryUsage usage
    Intended usage of memory.
    Definition: vk_mem_alloc.h:2154
    -
    Definition: vk_mem_alloc.h:2145
    +
    VmaMemoryUsage usage
    Intended usage of memory.
    Definition: vk_mem_alloc.h:2162
    +
    Definition: vk_mem_alloc.h:2153
    VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
    uint32_t blockCount
    Number of VkDeviceMemory Vulkan memory blocks allocated.
    Definition: vk_mem_alloc.h:1937
    PFN_vkFreeMemory vkFreeMemory
    Definition: vk_mem_alloc.h:1756
    -
    size_t maxBlockCount
    Maximum number of blocks that can be allocated in this pool. Optional.
    Definition: vk_mem_alloc.h:2335
    +
    size_t maxBlockCount
    Maximum number of blocks that can be allocated in this pool. Optional.
    Definition: vk_mem_alloc.h:2343
    const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
    Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
    Definition: vk_mem_alloc.h:1823
    -
    size_t unusedRangeCount
    Number of continuous memory ranges in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2366
    -
    VkFlags VmaAllocationCreateFlags
    Definition: vk_mem_alloc.h:2143
    -
    VkDeviceSize maxCpuBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
    Definition: vk_mem_alloc.h:2843
    -
    VmaPool pool
    Pool that this allocation should be created in.
    Definition: vk_mem_alloc.h:2178
    +
    size_t unusedRangeCount
    Number of continuous memory ranges in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2374
    +
    VkFlags VmaAllocationCreateFlags
    Definition: vk_mem_alloc.h:2151
    +
    VkDeviceSize maxCpuBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
    Definition: vk_mem_alloc.h:2851
    +
    VmaPool pool
    Pool that this allocation should be created in.
    Definition: vk_mem_alloc.h:2186
    void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
    const VkDeviceSize * pHeapSizeLimit
    Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
    Definition: vk_mem_alloc.h:1862
    VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
    Definition: vk_mem_alloc.h:1953
    -
    Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
    Definition: vk_mem_alloc.h:2075
    +
    Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
    Definition: vk_mem_alloc.h:2077
    VkDeviceSize allocationSizeMin
    Definition: vk_mem_alloc.h:1946
    VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
    PFN_vkCreateImage vkCreateImage
    Definition: vk_mem_alloc.h:1767
    VmaRecordFlags flags
    Flags for recording. Use VmaRecordFlagBits enum.
    Definition: vk_mem_alloc.h:1793
    -
    VmaDefragmentationFlagBits
    Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
    Definition: vk_mem_alloc.h:2789
    -
    VkCommandBuffer commandBuffer
    Optional. Command buffer where GPU copy commands will be posted.
    Definition: vk_mem_alloc.h:2867
    +
    VmaDefragmentationFlagBits
    Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
    Definition: vk_mem_alloc.h:2797
    +
    VkCommandBuffer commandBuffer
    Optional. Command buffer where GPU copy commands will be posted.
    Definition: vk_mem_alloc.h:2875
    PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
    Optional, can be null.
    Definition: vk_mem_alloc.h:1709
    PFN_vkDestroyBuffer vkDestroyBuffer
    Definition: vk_mem_alloc.h:1766
    VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
    Maps memory represented by given allocation and returns pointer to it.
    -
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:2349
    +
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:2357
    PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
    Definition: vk_mem_alloc.h:1759
    -
    Definition: vk_mem_alloc.h:2106
    +
    Definition: vk_mem_alloc.h:2108
    VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    Function similar to vmaAllocateMemoryForBuffer().
    struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
    Description of a Allocator to be created.
    const char * pFilePath
    Path to the file that should be written by the recording.
    Definition: vk_mem_alloc.h:1801
    -
    void * pUserData
    Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
    Definition: vk_mem_alloc.h:2500
    +
    void * pUserData
    Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
    Definition: vk_mem_alloc.h:2508
    VkDeviceSize preferredLargeHeapBlockSize
    Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB....
    Definition: vk_mem_alloc.h:1817
    VkDeviceSize allocationSizeAvg
    Definition: vk_mem_alloc.h:1946
    VkResult vmaAllocateMemoryPages(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo)
    General purpose memory allocation for multiple allocation objects at once.
    VkDeviceSize usedBytes
    Total number of bytes occupied by all allocations.
    Definition: vk_mem_alloc.h:1943
    struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    Checks magic number in margins around all allocations in given memory types (in both default and cust...
    -
    Describes parameter of existing VmaPool.
    Definition: vk_mem_alloc.h:2354
    -
    Parameters for defragmentation.
    Definition: vk_mem_alloc.h:2798
    +
    Describes parameter of existing VmaPool.
    Definition: vk_mem_alloc.h:2362
    +
    Parameters for defragmentation.
    Definition: vk_mem_alloc.h:2806
    VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    Checks magic number in margins around all allocations in given memory pool in search for corruptions.
    -
    Definition: vk_mem_alloc.h:2115
    -
    VkDeviceSize offset
    Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory,...
    Definition: vk_mem_alloc.h:2481
    -
    Definition: vk_mem_alloc.h:2129
    -
    Definition: vk_mem_alloc.h:2141
    -
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places.
    Definition: vk_mem_alloc.h:2890
    +
    Definition: vk_mem_alloc.h:2123
    +
    VkDeviceSize offset
    Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory,...
    Definition: vk_mem_alloc.h:2489
    +
    Definition: vk_mem_alloc.h:2137
    +
    Definition: vk_mem_alloc.h:2149
    +
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places.
    Definition: vk_mem_alloc.h:2898
    Pointers to some Vulkan functions - a subset used by the library.
    Definition: vk_mem_alloc.h:1752
    VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
    Creates Allocator object.
    uint32_t unusedRangeCount
    Number of free ranges of memory between allocations.
    Definition: vk_mem_alloc.h:1941
    -
    Definition: vk_mem_alloc.h:1996
    -
    VkFlags VmaPoolCreateFlags
    Definition: vk_mem_alloc.h:2303
    +
    Definition: vk_mem_alloc.h:1998
    +
    VkFlags VmaPoolCreateFlags
    Definition: vk_mem_alloc.h:2311
    void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
    Definition: vk_mem_alloc.h:1790
    uint32_t allocationCount
    Number of VmaAllocation allocation objects allocated.
    Definition: vk_mem_alloc.h:1939
    PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
    Definition: vk_mem_alloc.h:1764
    PFN_vkDestroyImage vkDestroyImage
    Definition: vk_mem_alloc.h:1768
    -
    Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
    Definition: vk_mem_alloc.h:2062
    -
    Definition: vk_mem_alloc.h:2136
    -
    Definition: vk_mem_alloc.h:2023
    -
    void * pMappedData
    Pointer to the beginning of this allocation as mapped data.
    Definition: vk_mem_alloc.h:2495
    +
    Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
    Definition: vk_mem_alloc.h:2064
    +
    Definition: vk_mem_alloc.h:2144
    +
    Definition: vk_mem_alloc.h:2025
    +
    void * pMappedData
    Pointer to the beginning of this allocation as mapped data.
    Definition: vk_mem_alloc.h:2503
    void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
    Destroys Vulkan image and frees allocated memory.
    Enables usage of VK_KHR_dedicated_allocation extension.
    Definition: vk_mem_alloc.h:1742
    struct VmaDefragmentationStats VmaDefragmentationStats
    Statistics returned by function vmaDefragment().
    PFN_vkAllocateMemory vkAllocateMemory
    Definition: vk_mem_alloc.h:1755
    -
    Enables alternative, linear allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2282
    +
    Enables alternative, linear allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2290
    VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
    Tries to resize an allocation in place, if there is enough free memory after it.
    -
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    Definition: vk_mem_alloc.h:2462
    +
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    Definition: vk_mem_alloc.h:2470
    VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    General purpose memory allocation.
    void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
    Sets index of the current frame.
    struct VmaAllocationCreateInfo VmaAllocationCreateInfo
    VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    -
    Definition: vk_mem_alloc.h:2126
    -
    VmaPoolCreateFlagBits
    Flags to be passed as VmaPoolCreateInfo::flags.
    Definition: vk_mem_alloc.h:2247
    +
    Definition: vk_mem_alloc.h:2134
    +
    VmaPoolCreateFlagBits
    Flags to be passed as VmaPoolCreateInfo::flags.
    Definition: vk_mem_alloc.h:2255
    VkDeviceSize unusedRangeSizeAvg
    Definition: vk_mem_alloc.h:1947
    VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
    Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame.
    - +
    VmaRecordFlagBits
    Flags to be used in VmaRecordSettings::flags.
    Definition: vk_mem_alloc.h:1777
    VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
    Definition: vk_mem_alloc.h:1954
    void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
    Destroys Vulkan buffer and frees allocated memory.
    -
    VkDeviceSize unusedSize
    Total number of bytes in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2360
    +
    VkDeviceSize unusedSize
    Total number of bytes in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2368
    VkDeviceSize unusedRangeSizeMax
    Definition: vk_mem_alloc.h:1947
    struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
    Parameters for defragmentation.
    -
    uint32_t maxGpuAllocationsToMove
    Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
    Definition: vk_mem_alloc.h:2858
    +
    uint32_t maxGpuAllocationsToMove
    Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
    Definition: vk_mem_alloc.h:2866
    struct VmaRecordSettings VmaRecordSettings
    Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
    -
    uint32_t memoryType
    Memory type index that this allocation was allocated from.
    Definition: vk_mem_alloc.h:2467
    -
    uint32_t poolCount
    Numer of pools in pPools array.
    Definition: vk_mem_alloc.h:2822
    +
    uint32_t memoryType
    Memory type index that this allocation was allocated from.
    Definition: vk_mem_alloc.h:2475
    +
    uint32_t poolCount
    Numer of pools in pPools array.
    Definition: vk_mem_alloc.h:2830