From 2283f4287ac25015a13e4eebd34c733c153bad5c Mon Sep 17 00:00:00 2001 From: Adam Sawicki Date: Mon, 12 Mar 2018 16:01:00 +0100 Subject: [PATCH] Minor fix in documentation. --- docs/html/configuration.html | 2 +- docs/html/vk__mem__alloc_8h_source.html | 218 ++++++++++++------------ src/vk_mem_alloc.h | 5 +- 3 files changed, 113 insertions(+), 112 deletions(-) diff --git a/docs/html/configuration.html b/docs/html/configuration.html index 08008f7..42ac113 100644 --- a/docs/html/configuration.html +++ b/docs/html/configuration.html @@ -66,7 +66,7 @@ $(function() {
Configuration
-

Please check "CONFIGURATION SECTION" in the code to find macros that you can define before each include of this file or change directly in this file to provide your own implementation of basic facilities like assert, min() and max() functions, mutex etc. C++ STL is used by default, but changing these allows you to get rid of any STL usage if you want, as many game developers tend to do.

+

Please check "CONFIGURATION SECTION" in the code to find macros that you can define before each include of this file or change directly in this file to provide your own implementation of basic facilities like assert, min() and max() functions, mutex, atomic etc. The library uses its own implementation of containers by default, but you can switch to using STL containers instead.

Pointers to Vulkan functions

The library uses Vulkan functions straight from the vulkan.h header by default. If you want to provide your own pointers to these functions, e.g. fetched using vkGetInstanceProcAddr() and vkGetDeviceProcAddr():

diff --git a/docs/html/vk__mem__alloc_8h_source.html b/docs/html/vk__mem__alloc_8h_source.html index 1bb54eb..5167773 100644 --- a/docs/html/vk__mem__alloc_8h_source.html +++ b/docs/html/vk__mem__alloc_8h_source.html @@ -62,157 +62,157 @@ $(function() {
vk_mem_alloc.h
-Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
934 #include <vulkan/vulkan.h>
935 
936 VK_DEFINE_HANDLE(VmaAllocator)
937 
938 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
940  VmaAllocator allocator,
941  uint32_t memoryType,
942  VkDeviceMemory memory,
943  VkDeviceSize size);
945 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
946  VmaAllocator allocator,
947  uint32_t memoryType,
948  VkDeviceMemory memory,
949  VkDeviceSize size);
950 
958 typedef struct VmaDeviceMemoryCallbacks {
964 
994 
997 typedef VkFlags VmaAllocatorCreateFlags;
998 
1003 typedef struct VmaVulkanFunctions {
1004  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1005  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1006  PFN_vkAllocateMemory vkAllocateMemory;
1007  PFN_vkFreeMemory vkFreeMemory;
1008  PFN_vkMapMemory vkMapMemory;
1009  PFN_vkUnmapMemory vkUnmapMemory;
1010  PFN_vkBindBufferMemory vkBindBufferMemory;
1011  PFN_vkBindImageMemory vkBindImageMemory;
1012  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1013  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1014  PFN_vkCreateBuffer vkCreateBuffer;
1015  PFN_vkDestroyBuffer vkDestroyBuffer;
1016  PFN_vkCreateImage vkCreateImage;
1017  PFN_vkDestroyImage vkDestroyImage;
1018  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1019  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1021 
1024 {
1026  VmaAllocatorCreateFlags flags;
1028 
1029  VkPhysicalDevice physicalDevice;
1031 
1032  VkDevice device;
1034 
1037 
1038  const VkAllocationCallbacks* pAllocationCallbacks;
1040 
1079  const VkDeviceSize* pHeapSizeLimit;
1093 
1095 VkResult vmaCreateAllocator(
1096  const VmaAllocatorCreateInfo* pCreateInfo,
1097  VmaAllocator* pAllocator);
1098 
1100 void vmaDestroyAllocator(
1101  VmaAllocator allocator);
1102 
1108  VmaAllocator allocator,
1109  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1110 
1116  VmaAllocator allocator,
1117  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1118 
1126  VmaAllocator allocator,
1127  uint32_t memoryTypeIndex,
1128  VkMemoryPropertyFlags* pFlags);
1129 
1139  VmaAllocator allocator,
1140  uint32_t frameIndex);
1141 
1144 typedef struct VmaStatInfo
1145 {
1147  uint32_t blockCount;
1153  VkDeviceSize usedBytes;
1155  VkDeviceSize unusedBytes;
1156  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
1157  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
1158 } VmaStatInfo;
1159 
1161 typedef struct VmaStats
1162 {
1163  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1164  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1166 } VmaStats;
1167 
1169 void vmaCalculateStats(
1170  VmaAllocator allocator,
1171  VmaStats* pStats);
1172 
1173 #define VMA_STATS_STRING_ENABLED 1
1174 
1175 #if VMA_STATS_STRING_ENABLED
1176 
1178 
1180 void vmaBuildStatsString(
1181  VmaAllocator allocator,
1182  char** ppStatsString,
1183  VkBool32 detailedMap);
1184 
1185 void vmaFreeStatsString(
1186  VmaAllocator allocator,
1187  char* pStatsString);
1188 
1189 #endif // #if VMA_STATS_STRING_ENABLED
1190 
1191 VK_DEFINE_HANDLE(VmaPool)
1192 
1193 typedef enum VmaMemoryUsage
1194 {
1243 } VmaMemoryUsage;
1244 
1259 
1309 
1313 
1315 {
1317  VmaAllocationCreateFlags flags;
1328  VkMemoryPropertyFlags requiredFlags;
1333  VkMemoryPropertyFlags preferredFlags;
1341  uint32_t memoryTypeBits;
1347  VmaPool pool;
1354  void* pUserData;
1356 
1373 VkResult vmaFindMemoryTypeIndex(
1374  VmaAllocator allocator,
1375  uint32_t memoryTypeBits,
1376  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1377  uint32_t* pMemoryTypeIndex);
1378 
1392  VmaAllocator allocator,
1393  const VkBufferCreateInfo* pBufferCreateInfo,
1394  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1395  uint32_t* pMemoryTypeIndex);
1396 
1410  VmaAllocator allocator,
1411  const VkImageCreateInfo* pImageCreateInfo,
1412  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1413  uint32_t* pMemoryTypeIndex);
1414 
1435 
1438 typedef VkFlags VmaPoolCreateFlags;
1439 
1442 typedef struct VmaPoolCreateInfo {
1448  VmaPoolCreateFlags flags;
1453  VkDeviceSize blockSize;
1482 
1485 typedef struct VmaPoolStats {
1488  VkDeviceSize size;
1491  VkDeviceSize unusedSize;
1504  VkDeviceSize unusedRangeSizeMax;
1505 } VmaPoolStats;
1506 
1513 VkResult vmaCreatePool(
1514  VmaAllocator allocator,
1515  const VmaPoolCreateInfo* pCreateInfo,
1516  VmaPool* pPool);
1517 
1520 void vmaDestroyPool(
1521  VmaAllocator allocator,
1522  VmaPool pool);
1523 
1530 void vmaGetPoolStats(
1531  VmaAllocator allocator,
1532  VmaPool pool,
1533  VmaPoolStats* pPoolStats);
1534 
1542  VmaAllocator allocator,
1543  VmaPool pool,
1544  size_t* pLostAllocationCount);
1545 
1546 VK_DEFINE_HANDLE(VmaAllocation)
1547 
1548 
1550 typedef struct VmaAllocationInfo {
1555  uint32_t memoryType;
1564  VkDeviceMemory deviceMemory;
1569  VkDeviceSize offset;
1574  VkDeviceSize size;
1588  void* pUserData;
1590 
1601 VkResult vmaAllocateMemory(
1602  VmaAllocator allocator,
1603  const VkMemoryRequirements* pVkMemoryRequirements,
1604  const VmaAllocationCreateInfo* pCreateInfo,
1605  VmaAllocation* pAllocation,
1606  VmaAllocationInfo* pAllocationInfo);
1607 
1615  VmaAllocator allocator,
1616  VkBuffer buffer,
1617  const VmaAllocationCreateInfo* pCreateInfo,
1618  VmaAllocation* pAllocation,
1619  VmaAllocationInfo* pAllocationInfo);
1620 
1622 VkResult vmaAllocateMemoryForImage(
1623  VmaAllocator allocator,
1624  VkImage image,
1625  const VmaAllocationCreateInfo* pCreateInfo,
1626  VmaAllocation* pAllocation,
1627  VmaAllocationInfo* pAllocationInfo);
1628 
1630 void vmaFreeMemory(
1631  VmaAllocator allocator,
1632  VmaAllocation allocation);
1633 
1651  VmaAllocator allocator,
1652  VmaAllocation allocation,
1653  VmaAllocationInfo* pAllocationInfo);
1654 
1669 VkBool32 vmaTouchAllocation(
1670  VmaAllocator allocator,
1671  VmaAllocation allocation);
1672 
1687  VmaAllocator allocator,
1688  VmaAllocation allocation,
1689  void* pUserData);
1690 
1702  VmaAllocator allocator,
1703  VmaAllocation* pAllocation);
1704 
1739 VkResult vmaMapMemory(
1740  VmaAllocator allocator,
1741  VmaAllocation allocation,
1742  void** ppData);
1743 
1748 void vmaUnmapMemory(
1749  VmaAllocator allocator,
1750  VmaAllocation allocation);
1751 
1753 typedef struct VmaDefragmentationInfo {
1758  VkDeviceSize maxBytesToMove;
1765 
1767 typedef struct VmaDefragmentationStats {
1769  VkDeviceSize bytesMoved;
1771  VkDeviceSize bytesFreed;
1777 
1860 VkResult vmaDefragment(
1861  VmaAllocator allocator,
1862  VmaAllocation* pAllocations,
1863  size_t allocationCount,
1864  VkBool32* pAllocationsChanged,
1865  const VmaDefragmentationInfo *pDefragmentationInfo,
1866  VmaDefragmentationStats* pDefragmentationStats);
1867 
1894 VkResult vmaCreateBuffer(
1895  VmaAllocator allocator,
1896  const VkBufferCreateInfo* pBufferCreateInfo,
1897  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1898  VkBuffer* pBuffer,
1899  VmaAllocation* pAllocation,
1900  VmaAllocationInfo* pAllocationInfo);
1901 
1913 void vmaDestroyBuffer(
1914  VmaAllocator allocator,
1915  VkBuffer buffer,
1916  VmaAllocation allocation);
1917 
1919 VkResult vmaCreateImage(
1920  VmaAllocator allocator,
1921  const VkImageCreateInfo* pImageCreateInfo,
1922  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1923  VkImage* pImage,
1924  VmaAllocation* pAllocation,
1925  VmaAllocationInfo* pAllocationInfo);
1926 
1938 void vmaDestroyImage(
1939  VmaAllocator allocator,
1940  VkImage image,
1941  VmaAllocation allocation);
1942 
1943 #ifdef __cplusplus
1944 }
1945 #endif
1946 
1947 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
1948 
1949 // For Visual Studio IntelliSense.
1950 #ifdef __INTELLISENSE__
1951 #define VMA_IMPLEMENTATION
1952 #endif
1953 
1954 #ifdef VMA_IMPLEMENTATION
1955 #undef VMA_IMPLEMENTATION
1956 
1957 #include <cstdint>
1958 #include <cstdlib>
1959 #include <cstring>
1960 
1961 /*******************************************************************************
1962 CONFIGURATION SECTION
1963 
1964 Define some of these macros before each #include of this header or change them
1965 here if you need other then default behavior depending on your environment.
1966 */
1967 
1968 /*
1969 Define this macro to 1 to make the library fetch pointers to Vulkan functions
1970 internally, like:
1971 
1972  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
1973 
1974 Define to 0 if you are going to provide you own pointers to Vulkan functions via
1975 VmaAllocatorCreateInfo::pVulkanFunctions.
1976 */
1977 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
1978 #define VMA_STATIC_VULKAN_FUNCTIONS 1
1979 #endif
1980 
1981 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
1982 //#define VMA_USE_STL_CONTAINERS 1
1983 
1984 /* Set this macro to 1 to make the library including and using STL containers:
1985 std::pair, std::vector, std::list, std::unordered_map.
1986 
1987 Set it to 0 or undefined to make the library using its own implementation of
1988 the containers.
1989 */
1990 #if VMA_USE_STL_CONTAINERS
1991  #define VMA_USE_STL_VECTOR 1
1992  #define VMA_USE_STL_UNORDERED_MAP 1
1993  #define VMA_USE_STL_LIST 1
1994 #endif
1995 
1996 #if VMA_USE_STL_VECTOR
1997  #include <vector>
1998 #endif
1999 
2000 #if VMA_USE_STL_UNORDERED_MAP
2001  #include <unordered_map>
2002 #endif
2003 
2004 #if VMA_USE_STL_LIST
2005  #include <list>
2006 #endif
2007 
2008 /*
2009 Following headers are used in this CONFIGURATION section only, so feel free to
2010 remove them if not needed.
2011 */
2012 #include <cassert> // for assert
2013 #include <algorithm> // for min, max
2014 #include <mutex> // for std::mutex
2015 #include <atomic> // for std::atomic
2016 
2017 #if !defined(_WIN32) && !defined(__APPLE__)
2018  #include <malloc.h> // for aligned_alloc()
2019 #endif
2020 
2021 #ifndef VMA_NULL
2022  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
2023  #define VMA_NULL nullptr
2024 #endif
2025 
2026 #if defined(__APPLE__) || defined(__ANDROID__)
2027 #include <cstdlib>
2028 void *aligned_alloc(size_t alignment, size_t size)
2029 {
2030  // alignment must be >= sizeof(void*)
2031  if(alignment < sizeof(void*))
2032  {
2033  alignment = sizeof(void*);
2034  }
2035 
2036  void *pointer;
2037  if(posix_memalign(&pointer, alignment, size) == 0)
2038  return pointer;
2039  return VMA_NULL;
2040 }
2041 #endif
2042 
2043 // Normal assert to check for programmer's errors, especially in Debug configuration.
2044 #ifndef VMA_ASSERT
2045  #ifdef _DEBUG
2046  #define VMA_ASSERT(expr) assert(expr)
2047  #else
2048  #define VMA_ASSERT(expr)
2049  #endif
2050 #endif
2051 
2052 // Assert that will be called very often, like inside data structures e.g. operator[].
2053 // Making it non-empty can make program slow.
2054 #ifndef VMA_HEAVY_ASSERT
2055  #ifdef _DEBUG
2056  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
2057  #else
2058  #define VMA_HEAVY_ASSERT(expr)
2059  #endif
2060 #endif
2061 
2062 #ifndef VMA_ALIGN_OF
2063  #define VMA_ALIGN_OF(type) (__alignof(type))
2064 #endif
2065 
2066 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
2067  #if defined(_WIN32)
2068  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
2069  #else
2070  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
2071  #endif
2072 #endif
2073 
2074 #ifndef VMA_SYSTEM_FREE
2075  #if defined(_WIN32)
2076  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
2077  #else
2078  #define VMA_SYSTEM_FREE(ptr) free(ptr)
2079  #endif
2080 #endif
2081 
2082 #ifndef VMA_MIN
2083  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
2084 #endif
2085 
2086 #ifndef VMA_MAX
2087  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
2088 #endif
2089 
2090 #ifndef VMA_SWAP
2091  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
2092 #endif
2093 
2094 #ifndef VMA_SORT
2095  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
2096 #endif
2097 
2098 #ifndef VMA_DEBUG_LOG
2099  #define VMA_DEBUG_LOG(format, ...)
2100  /*
2101  #define VMA_DEBUG_LOG(format, ...) do { \
2102  printf(format, __VA_ARGS__); \
2103  printf("\n"); \
2104  } while(false)
2105  */
2106 #endif
2107 
2108 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
2109 #if VMA_STATS_STRING_ENABLED
2110  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
2111  {
2112  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
2113  }
2114  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
2115  {
2116  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
2117  }
2118  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
2119  {
2120  snprintf(outStr, strLen, "%p", ptr);
2121  }
2122 #endif
2123 
2124 #ifndef VMA_MUTEX
2125  class VmaMutex
2126  {
2127  public:
2128  VmaMutex() { }
2129  ~VmaMutex() { }
2130  void Lock() { m_Mutex.lock(); }
2131  void Unlock() { m_Mutex.unlock(); }
2132  private:
2133  std::mutex m_Mutex;
2134  };
2135  #define VMA_MUTEX VmaMutex
2136 #endif
2137 
2138 /*
2139 If providing your own implementation, you need to implement a subset of std::atomic:
2140 
2141 - Constructor(uint32_t desired)
2142 - uint32_t load() const
2143 - void store(uint32_t desired)
2144 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
2145 */
2146 #ifndef VMA_ATOMIC_UINT32
2147  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
2148 #endif
2149 
2150 #ifndef VMA_BEST_FIT
2151 
2163  #define VMA_BEST_FIT (1)
2164 #endif
2165 
2166 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
2167 
2171  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
2172 #endif
2173 
2174 #ifndef VMA_DEBUG_ALIGNMENT
2175 
2179  #define VMA_DEBUG_ALIGNMENT (1)
2180 #endif
2181 
2182 #ifndef VMA_DEBUG_MARGIN
2183 
2187  #define VMA_DEBUG_MARGIN (0)
2188 #endif
2189 
2190 #ifndef VMA_DEBUG_GLOBAL_MUTEX
2191 
2195  #define VMA_DEBUG_GLOBAL_MUTEX (0)
2196 #endif
2197 
2198 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
2199 
2203  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
2204 #endif
2205 
2206 #ifndef VMA_SMALL_HEAP_MAX_SIZE
2207  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
2209 #endif
2210 
2211 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
2212  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
2214 #endif
2215 
2216 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
2217 
2218 /*******************************************************************************
2219 END OF CONFIGURATION
2220 */
2221 
2222 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
2223  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
2224 
2225 // Returns number of bits set to 1 in (v).
2226 static inline uint32_t VmaCountBitsSet(uint32_t v)
2227 {
2228  uint32_t c = v - ((v >> 1) & 0x55555555);
2229  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
2230  c = ((c >> 4) + c) & 0x0F0F0F0F;
2231  c = ((c >> 8) + c) & 0x00FF00FF;
2232  c = ((c >> 16) + c) & 0x0000FFFF;
2233  return c;
2234 }
2235 
2236 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
2237 // Use types like uint32_t, uint64_t as T.
2238 template <typename T>
2239 static inline T VmaAlignUp(T val, T align)
2240 {
2241  return (val + align - 1) / align * align;
2242 }
2243 
2244 // Division with mathematical rounding to nearest number.
2245 template <typename T>
2246 inline T VmaRoundDiv(T x, T y)
2247 {
2248  return (x + (y / (T)2)) / y;
2249 }
2250 
2251 #ifndef VMA_SORT
2252 
2253 template<typename Iterator, typename Compare>
2254 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
2255 {
2256  Iterator centerValue = end; --centerValue;
2257  Iterator insertIndex = beg;
2258  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
2259  {
2260  if(cmp(*memTypeIndex, *centerValue))
2261  {
2262  if(insertIndex != memTypeIndex)
2263  {
2264  VMA_SWAP(*memTypeIndex, *insertIndex);
2265  }
2266  ++insertIndex;
2267  }
2268  }
2269  if(insertIndex != centerValue)
2270  {
2271  VMA_SWAP(*insertIndex, *centerValue);
2272  }
2273  return insertIndex;
2274 }
2275 
2276 template<typename Iterator, typename Compare>
2277 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
2278 {
2279  if(beg < end)
2280  {
2281  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
2282  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
2283  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
2284  }
2285 }
2286 
2287 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
2288 
2289 #endif // #ifndef VMA_SORT
2290 
2291 /*
2292 Returns true if two memory blocks occupy overlapping pages.
2293 ResourceA must be in less memory offset than ResourceB.
2294 
2295 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
2296 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
2297 */
2298 static inline bool VmaBlocksOnSamePage(
2299  VkDeviceSize resourceAOffset,
2300  VkDeviceSize resourceASize,
2301  VkDeviceSize resourceBOffset,
2302  VkDeviceSize pageSize)
2303 {
2304  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
2305  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
2306  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
2307  VkDeviceSize resourceBStart = resourceBOffset;
2308  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
2309  return resourceAEndPage == resourceBStartPage;
2310 }
2311 
2312 enum VmaSuballocationType
2313 {
2314  VMA_SUBALLOCATION_TYPE_FREE = 0,
2315  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
2316  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
2317  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
2318  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
2319  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
2320  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
2321 };
2322 
2323 /*
2324 Returns true if given suballocation types could conflict and must respect
2325 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
2326 or linear image and another one is optimal image. If type is unknown, behave
2327 conservatively.
2328 */
2329 static inline bool VmaIsBufferImageGranularityConflict(
2330  VmaSuballocationType suballocType1,
2331  VmaSuballocationType suballocType2)
2332 {
2333  if(suballocType1 > suballocType2)
2334  {
2335  VMA_SWAP(suballocType1, suballocType2);
2336  }
2337 
2338  switch(suballocType1)
2339  {
2340  case VMA_SUBALLOCATION_TYPE_FREE:
2341  return false;
2342  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
2343  return true;
2344  case VMA_SUBALLOCATION_TYPE_BUFFER:
2345  return
2346  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2347  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2348  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
2349  return
2350  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2351  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
2352  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2353  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
2354  return
2355  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2356  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
2357  return false;
2358  default:
2359  VMA_ASSERT(0);
2360  return true;
2361  }
2362 }
2363 
2364 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
2365 struct VmaMutexLock
2366 {
2367 public:
2368  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
2369  m_pMutex(useMutex ? &mutex : VMA_NULL)
2370  {
2371  if(m_pMutex)
2372  {
2373  m_pMutex->Lock();
2374  }
2375  }
2376 
2377  ~VmaMutexLock()
2378  {
2379  if(m_pMutex)
2380  {
2381  m_pMutex->Unlock();
2382  }
2383  }
2384 
2385 private:
2386  VMA_MUTEX* m_pMutex;
2387 };
2388 
2389 #if VMA_DEBUG_GLOBAL_MUTEX
2390  static VMA_MUTEX gDebugGlobalMutex;
2391  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
2392 #else
2393  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
2394 #endif
2395 
2396 // Minimum size of a free suballocation to register it in the free suballocation collection.
2397 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
2398 
2399 /*
2400 Performs binary search and returns iterator to first element that is greater or
2401 equal to (key), according to comparison (cmp).
2402 
2403 Cmp should return true if first argument is less than second argument.
2404 
2405 Returned value is the found element, if present in the collection or place where
2406 new element with value (key) should be inserted.
2407 */
2408 template <typename IterT, typename KeyT, typename CmpT>
2409 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
2410 {
2411  size_t down = 0, up = (end - beg);
2412  while(down < up)
2413  {
2414  const size_t mid = (down + up) / 2;
2415  if(cmp(*(beg+mid), key))
2416  {
2417  down = mid + 1;
2418  }
2419  else
2420  {
2421  up = mid;
2422  }
2423  }
2424  return beg + down;
2425 }
2426 
2428 // Memory allocation
2429 
2430 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
2431 {
2432  if((pAllocationCallbacks != VMA_NULL) &&
2433  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
2434  {
2435  return (*pAllocationCallbacks->pfnAllocation)(
2436  pAllocationCallbacks->pUserData,
2437  size,
2438  alignment,
2439  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2440  }
2441  else
2442  {
2443  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
2444  }
2445 }
2446 
2447 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
2448 {
2449  if((pAllocationCallbacks != VMA_NULL) &&
2450  (pAllocationCallbacks->pfnFree != VMA_NULL))
2451  {
2452  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
2453  }
2454  else
2455  {
2456  VMA_SYSTEM_FREE(ptr);
2457  }
2458 }
2459 
2460 template<typename T>
2461 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
2462 {
2463  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
2464 }
2465 
2466 template<typename T>
2467 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
2468 {
2469  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
2470 }
2471 
2472 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
2473 
2474 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
2475 
2476 template<typename T>
2477 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
2478 {
2479  ptr->~T();
2480  VmaFree(pAllocationCallbacks, ptr);
2481 }
2482 
2483 template<typename T>
2484 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
2485 {
2486  if(ptr != VMA_NULL)
2487  {
2488  for(size_t i = count; i--; )
2489  {
2490  ptr[i].~T();
2491  }
2492  VmaFree(pAllocationCallbacks, ptr);
2493  }
2494 }
2495 
2496 // STL-compatible allocator.
2497 template<typename T>
2498 class VmaStlAllocator
2499 {
2500 public:
2501  const VkAllocationCallbacks* const m_pCallbacks;
2502  typedef T value_type;
2503 
2504  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
2505  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
2506 
2507  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
2508  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
2509 
2510  template<typename U>
2511  bool operator==(const VmaStlAllocator<U>& rhs) const
2512  {
2513  return m_pCallbacks == rhs.m_pCallbacks;
2514  }
2515  template<typename U>
2516  bool operator!=(const VmaStlAllocator<U>& rhs) const
2517  {
2518  return m_pCallbacks != rhs.m_pCallbacks;
2519  }
2520 
2521  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
2522 };
2523 
2524 #if VMA_USE_STL_VECTOR
2525 
2526 #define VmaVector std::vector
2527 
2528 template<typename T, typename allocatorT>
2529 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
2530 {
2531  vec.insert(vec.begin() + index, item);
2532 }
2533 
2534 template<typename T, typename allocatorT>
2535 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
2536 {
2537  vec.erase(vec.begin() + index);
2538 }
2539 
2540 #else // #if VMA_USE_STL_VECTOR
2541 
2542 /* Class with interface compatible with subset of std::vector.
2543 T must be POD because constructors and destructors are not called and memcpy is
2544 used for these objects. */
2545 template<typename T, typename AllocatorT>
2546 class VmaVector
2547 {
2548 public:
2549  typedef T value_type;
2550 
2551  VmaVector(const AllocatorT& allocator) :
2552  m_Allocator(allocator),
2553  m_pArray(VMA_NULL),
2554  m_Count(0),
2555  m_Capacity(0)
2556  {
2557  }
2558 
2559  VmaVector(size_t count, const AllocatorT& allocator) :
2560  m_Allocator(allocator),
2561  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
2562  m_Count(count),
2563  m_Capacity(count)
2564  {
2565  }
2566 
2567  VmaVector(const VmaVector<T, AllocatorT>& src) :
2568  m_Allocator(src.m_Allocator),
2569  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
2570  m_Count(src.m_Count),
2571  m_Capacity(src.m_Count)
2572  {
2573  if(m_Count != 0)
2574  {
2575  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
2576  }
2577  }
2578 
2579  ~VmaVector()
2580  {
2581  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2582  }
2583 
2584  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
2585  {
2586  if(&rhs != this)
2587  {
2588  resize(rhs.m_Count);
2589  if(m_Count != 0)
2590  {
2591  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
2592  }
2593  }
2594  return *this;
2595  }
2596 
2597  bool empty() const { return m_Count == 0; }
2598  size_t size() const { return m_Count; }
2599  T* data() { return m_pArray; }
2600  const T* data() const { return m_pArray; }
2601 
2602  T& operator[](size_t index)
2603  {
2604  VMA_HEAVY_ASSERT(index < m_Count);
2605  return m_pArray[index];
2606  }
2607  const T& operator[](size_t index) const
2608  {
2609  VMA_HEAVY_ASSERT(index < m_Count);
2610  return m_pArray[index];
2611  }
2612 
2613  T& front()
2614  {
2615  VMA_HEAVY_ASSERT(m_Count > 0);
2616  return m_pArray[0];
2617  }
2618  const T& front() const
2619  {
2620  VMA_HEAVY_ASSERT(m_Count > 0);
2621  return m_pArray[0];
2622  }
2623  T& back()
2624  {
2625  VMA_HEAVY_ASSERT(m_Count > 0);
2626  return m_pArray[m_Count - 1];
2627  }
2628  const T& back() const
2629  {
2630  VMA_HEAVY_ASSERT(m_Count > 0);
2631  return m_pArray[m_Count - 1];
2632  }
2633 
2634  void reserve(size_t newCapacity, bool freeMemory = false)
2635  {
2636  newCapacity = VMA_MAX(newCapacity, m_Count);
2637 
2638  if((newCapacity < m_Capacity) && !freeMemory)
2639  {
2640  newCapacity = m_Capacity;
2641  }
2642 
2643  if(newCapacity != m_Capacity)
2644  {
2645  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
2646  if(m_Count != 0)
2647  {
2648  memcpy(newArray, m_pArray, m_Count * sizeof(T));
2649  }
2650  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2651  m_Capacity = newCapacity;
2652  m_pArray = newArray;
2653  }
2654  }
2655 
2656  void resize(size_t newCount, bool freeMemory = false)
2657  {
2658  size_t newCapacity = m_Capacity;
2659  if(newCount > m_Capacity)
2660  {
2661  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
2662  }
2663  else if(freeMemory)
2664  {
2665  newCapacity = newCount;
2666  }
2667 
2668  if(newCapacity != m_Capacity)
2669  {
2670  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
2671  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
2672  if(elementsToCopy != 0)
2673  {
2674  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
2675  }
2676  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2677  m_Capacity = newCapacity;
2678  m_pArray = newArray;
2679  }
2680 
2681  m_Count = newCount;
2682  }
2683 
2684  void clear(bool freeMemory = false)
2685  {
2686  resize(0, freeMemory);
2687  }
2688 
2689  void insert(size_t index, const T& src)
2690  {
2691  VMA_HEAVY_ASSERT(index <= m_Count);
2692  const size_t oldCount = size();
2693  resize(oldCount + 1);
2694  if(index < oldCount)
2695  {
2696  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
2697  }
2698  m_pArray[index] = src;
2699  }
2700 
2701  void remove(size_t index)
2702  {
2703  VMA_HEAVY_ASSERT(index < m_Count);
2704  const size_t oldCount = size();
2705  if(index < oldCount - 1)
2706  {
2707  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
2708  }
2709  resize(oldCount - 1);
2710  }
2711 
2712  void push_back(const T& src)
2713  {
2714  const size_t newIndex = size();
2715  resize(newIndex + 1);
2716  m_pArray[newIndex] = src;
2717  }
2718 
2719  void pop_back()
2720  {
2721  VMA_HEAVY_ASSERT(m_Count > 0);
2722  resize(size() - 1);
2723  }
2724 
2725  void push_front(const T& src)
2726  {
2727  insert(0, src);
2728  }
2729 
2730  void pop_front()
2731  {
2732  VMA_HEAVY_ASSERT(m_Count > 0);
2733  remove(0);
2734  }
2735 
2736  typedef T* iterator;
2737 
2738  iterator begin() { return m_pArray; }
2739  iterator end() { return m_pArray + m_Count; }
2740 
2741 private:
2742  AllocatorT m_Allocator;
2743  T* m_pArray;
2744  size_t m_Count;
2745  size_t m_Capacity;
2746 };
2747 
2748 template<typename T, typename allocatorT>
2749 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
2750 {
2751  vec.insert(index, item);
2752 }
2753 
2754 template<typename T, typename allocatorT>
2755 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
2756 {
2757  vec.remove(index);
2758 }
2759 
2760 #endif // #if VMA_USE_STL_VECTOR
2761 
2762 template<typename CmpLess, typename VectorT>
2763 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
2764 {
2765  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2766  vector.data(),
2767  vector.data() + vector.size(),
2768  value,
2769  CmpLess()) - vector.data();
2770  VmaVectorInsert(vector, indexToInsert, value);
2771  return indexToInsert;
2772 }
2773 
2774 template<typename CmpLess, typename VectorT>
2775 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
2776 {
2777  CmpLess comparator;
2778  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2779  vector.begin(),
2780  vector.end(),
2781  value,
2782  comparator);
2783  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
2784  {
2785  size_t indexToRemove = it - vector.begin();
2786  VmaVectorRemove(vector, indexToRemove);
2787  return true;
2788  }
2789  return false;
2790 }
2791 
2792 template<typename CmpLess, typename VectorT>
2793 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
2794 {
2795  CmpLess comparator;
2796  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2797  vector.data(),
2798  vector.data() + vector.size(),
2799  value,
2800  comparator);
2801  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
2802  {
2803  return it - vector.begin();
2804  }
2805  else
2806  {
2807  return vector.size();
2808  }
2809 }
2810 
2812 // class VmaPoolAllocator
2813 
2814 /*
2815 Allocator for objects of type T using a list of arrays (pools) to speed up
2816 allocation. Number of elements that can be allocated is not bounded because
2817 allocator can create multiple blocks.
2818 */
2819 template<typename T>
2820 class VmaPoolAllocator
2821 {
2822 public:
2823  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
2824  ~VmaPoolAllocator();
2825  void Clear();
2826  T* Alloc();
2827  void Free(T* ptr);
2828 
2829 private:
2830  union Item
2831  {
2832  uint32_t NextFreeIndex;
2833  T Value;
2834  };
2835 
2836  struct ItemBlock
2837  {
2838  Item* pItems;
2839  uint32_t FirstFreeIndex;
2840  };
2841 
2842  const VkAllocationCallbacks* m_pAllocationCallbacks;
2843  size_t m_ItemsPerBlock;
2844  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
2845 
2846  ItemBlock& CreateNewBlock();
2847 };
2848 
2849 template<typename T>
2850 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
2851  m_pAllocationCallbacks(pAllocationCallbacks),
2852  m_ItemsPerBlock(itemsPerBlock),
2853  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
2854 {
2855  VMA_ASSERT(itemsPerBlock > 0);
2856 }
2857 
2858 template<typename T>
2859 VmaPoolAllocator<T>::~VmaPoolAllocator()
2860 {
2861  Clear();
2862 }
2863 
2864 template<typename T>
2865 void VmaPoolAllocator<T>::Clear()
2866 {
2867  for(size_t i = m_ItemBlocks.size(); i--; )
2868  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
2869  m_ItemBlocks.clear();
2870 }
2871 
2872 template<typename T>
2873 T* VmaPoolAllocator<T>::Alloc()
2874 {
2875  for(size_t i = m_ItemBlocks.size(); i--; )
2876  {
2877  ItemBlock& block = m_ItemBlocks[i];
2878  // This block has some free items: Use first one.
2879  if(block.FirstFreeIndex != UINT32_MAX)
2880  {
2881  Item* const pItem = &block.pItems[block.FirstFreeIndex];
2882  block.FirstFreeIndex = pItem->NextFreeIndex;
2883  return &pItem->Value;
2884  }
2885  }
2886 
2887  // No block has free item: Create new one and use it.
2888  ItemBlock& newBlock = CreateNewBlock();
2889  Item* const pItem = &newBlock.pItems[0];
2890  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
2891  return &pItem->Value;
2892 }
2893 
2894 template<typename T>
2895 void VmaPoolAllocator<T>::Free(T* ptr)
2896 {
2897  // Search all memory blocks to find ptr.
2898  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
2899  {
2900  ItemBlock& block = m_ItemBlocks[i];
2901 
2902  // Casting to union.
2903  Item* pItemPtr;
2904  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
2905 
2906  // Check if pItemPtr is in address range of this block.
2907  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
2908  {
2909  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
2910  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
2911  block.FirstFreeIndex = index;
2912  return;
2913  }
2914  }
2915  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
2916 }
2917 
2918 template<typename T>
2919 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
2920 {
2921  ItemBlock newBlock = {
2922  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
2923 
2924  m_ItemBlocks.push_back(newBlock);
2925 
2926  // Setup singly-linked list of all free items in this block.
2927  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
2928  newBlock.pItems[i].NextFreeIndex = i + 1;
2929  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
2930  return m_ItemBlocks.back();
2931 }
2932 
2934 // class VmaRawList, VmaList
2935 
2936 #if VMA_USE_STL_LIST
2937 
2938 #define VmaList std::list
2939 
2940 #else // #if VMA_USE_STL_LIST
2941 
2942 template<typename T>
2943 struct VmaListItem
2944 {
2945  VmaListItem* pPrev;
2946  VmaListItem* pNext;
2947  T Value;
2948 };
2949 
2950 // Doubly linked list.
2951 template<typename T>
2952 class VmaRawList
2953 {
2954 public:
2955  typedef VmaListItem<T> ItemType;
2956 
2957  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
2958  ~VmaRawList();
2959  void Clear();
2960 
2961  size_t GetCount() const { return m_Count; }
2962  bool IsEmpty() const { return m_Count == 0; }
2963 
2964  ItemType* Front() { return m_pFront; }
2965  const ItemType* Front() const { return m_pFront; }
2966  ItemType* Back() { return m_pBack; }
2967  const ItemType* Back() const { return m_pBack; }
2968 
2969  ItemType* PushBack();
2970  ItemType* PushFront();
2971  ItemType* PushBack(const T& value);
2972  ItemType* PushFront(const T& value);
2973  void PopBack();
2974  void PopFront();
2975 
2976  // Item can be null - it means PushBack.
2977  ItemType* InsertBefore(ItemType* pItem);
2978  // Item can be null - it means PushFront.
2979  ItemType* InsertAfter(ItemType* pItem);
2980 
2981  ItemType* InsertBefore(ItemType* pItem, const T& value);
2982  ItemType* InsertAfter(ItemType* pItem, const T& value);
2983 
2984  void Remove(ItemType* pItem);
2985 
2986 private:
2987  const VkAllocationCallbacks* const m_pAllocationCallbacks;
2988  VmaPoolAllocator<ItemType> m_ItemAllocator;
2989  ItemType* m_pFront;
2990  ItemType* m_pBack;
2991  size_t m_Count;
2992 
2993  // Declared not defined, to block copy constructor and assignment operator.
2994  VmaRawList(const VmaRawList<T>& src);
2995  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
2996 };
2997 
2998 template<typename T>
2999 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
3000  m_pAllocationCallbacks(pAllocationCallbacks),
3001  m_ItemAllocator(pAllocationCallbacks, 128),
3002  m_pFront(VMA_NULL),
3003  m_pBack(VMA_NULL),
3004  m_Count(0)
3005 {
3006 }
3007 
3008 template<typename T>
3009 VmaRawList<T>::~VmaRawList()
3010 {
3011  // Intentionally not calling Clear, because that would be unnecessary
3012  // computations to return all items to m_ItemAllocator as free.
3013 }
3014 
3015 template<typename T>
3016 void VmaRawList<T>::Clear()
3017 {
3018  if(IsEmpty() == false)
3019  {
3020  ItemType* pItem = m_pBack;
3021  while(pItem != VMA_NULL)
3022  {
3023  ItemType* const pPrevItem = pItem->pPrev;
3024  m_ItemAllocator.Free(pItem);
3025  pItem = pPrevItem;
3026  }
3027  m_pFront = VMA_NULL;
3028  m_pBack = VMA_NULL;
3029  m_Count = 0;
3030  }
3031 }
3032 
3033 template<typename T>
3034 VmaListItem<T>* VmaRawList<T>::PushBack()
3035 {
3036  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3037  pNewItem->pNext = VMA_NULL;
3038  if(IsEmpty())
3039  {
3040  pNewItem->pPrev = VMA_NULL;
3041  m_pFront = pNewItem;
3042  m_pBack = pNewItem;
3043  m_Count = 1;
3044  }
3045  else
3046  {
3047  pNewItem->pPrev = m_pBack;
3048  m_pBack->pNext = pNewItem;
3049  m_pBack = pNewItem;
3050  ++m_Count;
3051  }
3052  return pNewItem;
3053 }
3054 
3055 template<typename T>
3056 VmaListItem<T>* VmaRawList<T>::PushFront()
3057 {
3058  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3059  pNewItem->pPrev = VMA_NULL;
3060  if(IsEmpty())
3061  {
3062  pNewItem->pNext = VMA_NULL;
3063  m_pFront = pNewItem;
3064  m_pBack = pNewItem;
3065  m_Count = 1;
3066  }
3067  else
3068  {
3069  pNewItem->pNext = m_pFront;
3070  m_pFront->pPrev = pNewItem;
3071  m_pFront = pNewItem;
3072  ++m_Count;
3073  }
3074  return pNewItem;
3075 }
3076 
3077 template<typename T>
3078 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
3079 {
3080  ItemType* const pNewItem = PushBack();
3081  pNewItem->Value = value;
3082  return pNewItem;
3083 }
3084 
3085 template<typename T>
3086 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
3087 {
3088  ItemType* const pNewItem = PushFront();
3089  pNewItem->Value = value;
3090  return pNewItem;
3091 }
3092 
3093 template<typename T>
3094 void VmaRawList<T>::PopBack()
3095 {
3096  VMA_HEAVY_ASSERT(m_Count > 0);
3097  ItemType* const pBackItem = m_pBack;
3098  ItemType* const pPrevItem = pBackItem->pPrev;
3099  if(pPrevItem != VMA_NULL)
3100  {
3101  pPrevItem->pNext = VMA_NULL;
3102  }
3103  m_pBack = pPrevItem;
3104  m_ItemAllocator.Free(pBackItem);
3105  --m_Count;
3106 }
3107 
3108 template<typename T>
3109 void VmaRawList<T>::PopFront()
3110 {
3111  VMA_HEAVY_ASSERT(m_Count > 0);
3112  ItemType* const pFrontItem = m_pFront;
3113  ItemType* const pNextItem = pFrontItem->pNext;
3114  if(pNextItem != VMA_NULL)
3115  {
3116  pNextItem->pPrev = VMA_NULL;
3117  }
3118  m_pFront = pNextItem;
3119  m_ItemAllocator.Free(pFrontItem);
3120  --m_Count;
3121 }
3122 
3123 template<typename T>
3124 void VmaRawList<T>::Remove(ItemType* pItem)
3125 {
3126  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
3127  VMA_HEAVY_ASSERT(m_Count > 0);
3128 
3129  if(pItem->pPrev != VMA_NULL)
3130  {
3131  pItem->pPrev->pNext = pItem->pNext;
3132  }
3133  else
3134  {
3135  VMA_HEAVY_ASSERT(m_pFront == pItem);
3136  m_pFront = pItem->pNext;
3137  }
3138 
3139  if(pItem->pNext != VMA_NULL)
3140  {
3141  pItem->pNext->pPrev = pItem->pPrev;
3142  }
3143  else
3144  {
3145  VMA_HEAVY_ASSERT(m_pBack == pItem);
3146  m_pBack = pItem->pPrev;
3147  }
3148 
3149  m_ItemAllocator.Free(pItem);
3150  --m_Count;
3151 }
3152 
3153 template<typename T>
3154 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
3155 {
3156  if(pItem != VMA_NULL)
3157  {
3158  ItemType* const prevItem = pItem->pPrev;
3159  ItemType* const newItem = m_ItemAllocator.Alloc();
3160  newItem->pPrev = prevItem;
3161  newItem->pNext = pItem;
3162  pItem->pPrev = newItem;
3163  if(prevItem != VMA_NULL)
3164  {
3165  prevItem->pNext = newItem;
3166  }
3167  else
3168  {
3169  VMA_HEAVY_ASSERT(m_pFront == pItem);
3170  m_pFront = newItem;
3171  }
3172  ++m_Count;
3173  return newItem;
3174  }
3175  else
3176  return PushBack();
3177 }
3178 
3179 template<typename T>
3180 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
3181 {
3182  if(pItem != VMA_NULL)
3183  {
3184  ItemType* const nextItem = pItem->pNext;
3185  ItemType* const newItem = m_ItemAllocator.Alloc();
3186  newItem->pNext = nextItem;
3187  newItem->pPrev = pItem;
3188  pItem->pNext = newItem;
3189  if(nextItem != VMA_NULL)
3190  {
3191  nextItem->pPrev = newItem;
3192  }
3193  else
3194  {
3195  VMA_HEAVY_ASSERT(m_pBack == pItem);
3196  m_pBack = newItem;
3197  }
3198  ++m_Count;
3199  return newItem;
3200  }
3201  else
3202  return PushFront();
3203 }
3204 
3205 template<typename T>
3206 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
3207 {
3208  ItemType* const newItem = InsertBefore(pItem);
3209  newItem->Value = value;
3210  return newItem;
3211 }
3212 
3213 template<typename T>
3214 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
3215 {
3216  ItemType* const newItem = InsertAfter(pItem);
3217  newItem->Value = value;
3218  return newItem;
3219 }
3220 
3221 template<typename T, typename AllocatorT>
3222 class VmaList
3223 {
3224 public:
3225  class iterator
3226  {
3227  public:
3228  iterator() :
3229  m_pList(VMA_NULL),
3230  m_pItem(VMA_NULL)
3231  {
3232  }
3233 
3234  T& operator*() const
3235  {
3236  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3237  return m_pItem->Value;
3238  }
3239  T* operator->() const
3240  {
3241  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3242  return &m_pItem->Value;
3243  }
3244 
3245  iterator& operator++()
3246  {
3247  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3248  m_pItem = m_pItem->pNext;
3249  return *this;
3250  }
3251  iterator& operator--()
3252  {
3253  if(m_pItem != VMA_NULL)
3254  {
3255  m_pItem = m_pItem->pPrev;
3256  }
3257  else
3258  {
3259  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3260  m_pItem = m_pList->Back();
3261  }
3262  return *this;
3263  }
3264 
3265  iterator operator++(int)
3266  {
3267  iterator result = *this;
3268  ++*this;
3269  return result;
3270  }
3271  iterator operator--(int)
3272  {
3273  iterator result = *this;
3274  --*this;
3275  return result;
3276  }
3277 
3278  bool operator==(const iterator& rhs) const
3279  {
3280  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3281  return m_pItem == rhs.m_pItem;
3282  }
3283  bool operator!=(const iterator& rhs) const
3284  {
3285  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3286  return m_pItem != rhs.m_pItem;
3287  }
3288 
3289  private:
3290  VmaRawList<T>* m_pList;
3291  VmaListItem<T>* m_pItem;
3292 
3293  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
3294  m_pList(pList),
3295  m_pItem(pItem)
3296  {
3297  }
3298 
3299  friend class VmaList<T, AllocatorT>;
3300  };
3301 
3302  class const_iterator
3303  {
3304  public:
3305  const_iterator() :
3306  m_pList(VMA_NULL),
3307  m_pItem(VMA_NULL)
3308  {
3309  }
3310 
3311  const_iterator(const iterator& src) :
3312  m_pList(src.m_pList),
3313  m_pItem(src.m_pItem)
3314  {
3315  }
3316 
3317  const T& operator*() const
3318  {
3319  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3320  return m_pItem->Value;
3321  }
3322  const T* operator->() const
3323  {
3324  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3325  return &m_pItem->Value;
3326  }
3327 
3328  const_iterator& operator++()
3329  {
3330  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3331  m_pItem = m_pItem->pNext;
3332  return *this;
3333  }
3334  const_iterator& operator--()
3335  {
3336  if(m_pItem != VMA_NULL)
3337  {
3338  m_pItem = m_pItem->pPrev;
3339  }
3340  else
3341  {
3342  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3343  m_pItem = m_pList->Back();
3344  }
3345  return *this;
3346  }
3347 
3348  const_iterator operator++(int)
3349  {
3350  const_iterator result = *this;
3351  ++*this;
3352  return result;
3353  }
3354  const_iterator operator--(int)
3355  {
3356  const_iterator result = *this;
3357  --*this;
3358  return result;
3359  }
3360 
3361  bool operator==(const const_iterator& rhs) const
3362  {
3363  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3364  return m_pItem == rhs.m_pItem;
3365  }
3366  bool operator!=(const const_iterator& rhs) const
3367  {
3368  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3369  return m_pItem != rhs.m_pItem;
3370  }
3371 
3372  private:
3373  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
3374  m_pList(pList),
3375  m_pItem(pItem)
3376  {
3377  }
3378 
3379  const VmaRawList<T>* m_pList;
3380  const VmaListItem<T>* m_pItem;
3381 
3382  friend class VmaList<T, AllocatorT>;
3383  };
3384 
3385  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
3386 
3387  bool empty() const { return m_RawList.IsEmpty(); }
3388  size_t size() const { return m_RawList.GetCount(); }
3389 
3390  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
3391  iterator end() { return iterator(&m_RawList, VMA_NULL); }
3392 
3393  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
3394  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
3395 
3396  void clear() { m_RawList.Clear(); }
3397  void push_back(const T& value) { m_RawList.PushBack(value); }
3398  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
3399  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
3400 
3401 private:
3402  VmaRawList<T> m_RawList;
3403 };
3404 
3405 #endif // #if VMA_USE_STL_LIST
3406 
3408 // class VmaMap
3409 
3410 // Unused in this version.
3411 #if 0
3412 
3413 #if VMA_USE_STL_UNORDERED_MAP
3414 
3415 #define VmaPair std::pair
3416 
3417 #define VMA_MAP_TYPE(KeyT, ValueT) \
3418  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
3419 
3420 #else // #if VMA_USE_STL_UNORDERED_MAP
3421 
3422 template<typename T1, typename T2>
3423 struct VmaPair
3424 {
3425  T1 first;
3426  T2 second;
3427 
3428  VmaPair() : first(), second() { }
3429  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
3430 };
3431 
3432 /* Class compatible with subset of interface of std::unordered_map.
3433 KeyT, ValueT must be POD because they will be stored in VmaVector.
3434 */
3435 template<typename KeyT, typename ValueT>
3436 class VmaMap
3437 {
3438 public:
3439  typedef VmaPair<KeyT, ValueT> PairType;
3440  typedef PairType* iterator;
3441 
3442  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
3443 
3444  iterator begin() { return m_Vector.begin(); }
3445  iterator end() { return m_Vector.end(); }
3446 
3447  void insert(const PairType& pair);
3448  iterator find(const KeyT& key);
3449  void erase(iterator it);
3450 
3451 private:
3452  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
3453 };
3454 
3455 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
3456 
3457 template<typename FirstT, typename SecondT>
3458 struct VmaPairFirstLess
3459 {
3460  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
3461  {
3462  return lhs.first < rhs.first;
3463  }
3464  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
3465  {
3466  return lhs.first < rhsFirst;
3467  }
3468 };
3469 
3470 template<typename KeyT, typename ValueT>
3471 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
3472 {
3473  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3474  m_Vector.data(),
3475  m_Vector.data() + m_Vector.size(),
3476  pair,
3477  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
3478  VmaVectorInsert(m_Vector, indexToInsert, pair);
3479 }
3480 
3481 template<typename KeyT, typename ValueT>
3482 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
3483 {
3484  PairType* it = VmaBinaryFindFirstNotLess(
3485  m_Vector.data(),
3486  m_Vector.data() + m_Vector.size(),
3487  key,
3488  VmaPairFirstLess<KeyT, ValueT>());
3489  if((it != m_Vector.end()) && (it->first == key))
3490  {
3491  return it;
3492  }
3493  else
3494  {
3495  return m_Vector.end();
3496  }
3497 }
3498 
3499 template<typename KeyT, typename ValueT>
3500 void VmaMap<KeyT, ValueT>::erase(iterator it)
3501 {
3502  VmaVectorRemove(m_Vector, it - m_Vector.begin());
3503 }
3504 
3505 #endif // #if VMA_USE_STL_UNORDERED_MAP
3506 
3507 #endif // #if 0
3508 
3510 
3511 class VmaDeviceMemoryBlock;
3512 
3513 struct VmaAllocation_T
3514 {
3515 private:
3516  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
3517 
3518  enum FLAGS
3519  {
3520  FLAG_USER_DATA_STRING = 0x01,
3521  };
3522 
3523 public:
3524  enum ALLOCATION_TYPE
3525  {
3526  ALLOCATION_TYPE_NONE,
3527  ALLOCATION_TYPE_BLOCK,
3528  ALLOCATION_TYPE_DEDICATED,
3529  };
3530 
3531  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
3532  m_Alignment(1),
3533  m_Size(0),
3534  m_pUserData(VMA_NULL),
3535  m_LastUseFrameIndex(currentFrameIndex),
3536  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
3537  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
3538  m_MapCount(0),
3539  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
3540  {
3541  }
3542 
3543  ~VmaAllocation_T()
3544  {
3545  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
3546 
3547  // Check if owned string was freed.
3548  VMA_ASSERT(m_pUserData == VMA_NULL);
3549  }
3550 
3551  void InitBlockAllocation(
3552  VmaPool hPool,
3553  VmaDeviceMemoryBlock* block,
3554  VkDeviceSize offset,
3555  VkDeviceSize alignment,
3556  VkDeviceSize size,
3557  VmaSuballocationType suballocationType,
3558  bool mapped,
3559  bool canBecomeLost)
3560  {
3561  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3562  VMA_ASSERT(block != VMA_NULL);
3563  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3564  m_Alignment = alignment;
3565  m_Size = size;
3566  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3567  m_SuballocationType = (uint8_t)suballocationType;
3568  m_BlockAllocation.m_hPool = hPool;
3569  m_BlockAllocation.m_Block = block;
3570  m_BlockAllocation.m_Offset = offset;
3571  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
3572  }
3573 
3574  void InitLost()
3575  {
3576  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3577  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
3578  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3579  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
3580  m_BlockAllocation.m_Block = VMA_NULL;
3581  m_BlockAllocation.m_Offset = 0;
3582  m_BlockAllocation.m_CanBecomeLost = true;
3583  }
3584 
3585  void ChangeBlockAllocation(
3586  VmaAllocator hAllocator,
3587  VmaDeviceMemoryBlock* block,
3588  VkDeviceSize offset);
3589 
3590  // pMappedData not null means allocation is created with MAPPED flag.
3591  void InitDedicatedAllocation(
3592  uint32_t memoryTypeIndex,
3593  VkDeviceMemory hMemory,
3594  VmaSuballocationType suballocationType,
3595  void* pMappedData,
3596  VkDeviceSize size)
3597  {
3598  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3599  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
3600  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
3601  m_Alignment = 0;
3602  m_Size = size;
3603  m_SuballocationType = (uint8_t)suballocationType;
3604  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3605  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
3606  m_DedicatedAllocation.m_hMemory = hMemory;
3607  m_DedicatedAllocation.m_pMappedData = pMappedData;
3608  }
3609 
3610  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
3611  VkDeviceSize GetAlignment() const { return m_Alignment; }
3612  VkDeviceSize GetSize() const { return m_Size; }
3613  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
3614  void* GetUserData() const { return m_pUserData; }
3615  void SetUserData(VmaAllocator hAllocator, void* pUserData);
3616  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
3617 
3618  VmaDeviceMemoryBlock* GetBlock() const
3619  {
3620  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
3621  return m_BlockAllocation.m_Block;
3622  }
3623  VkDeviceSize GetOffset() const;
3624  VkDeviceMemory GetMemory() const;
3625  uint32_t GetMemoryTypeIndex() const;
3626  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
3627  void* GetMappedData() const;
3628  bool CanBecomeLost() const;
3629  VmaPool GetPool() const;
3630 
3631  uint32_t GetLastUseFrameIndex() const
3632  {
3633  return m_LastUseFrameIndex.load();
3634  }
3635  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
3636  {
3637  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
3638  }
3639  /*
3640  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
3641  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
3642  - Else, returns false.
3643 
3644  If hAllocation is already lost, assert - you should not call it then.
3645  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
3646  */
3647  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3648 
3649  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
3650  {
3651  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
3652  outInfo.blockCount = 1;
3653  outInfo.allocationCount = 1;
3654  outInfo.unusedRangeCount = 0;
3655  outInfo.usedBytes = m_Size;
3656  outInfo.unusedBytes = 0;
3657  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
3658  outInfo.unusedRangeSizeMin = UINT64_MAX;
3659  outInfo.unusedRangeSizeMax = 0;
3660  }
3661 
3662  void BlockAllocMap();
3663  void BlockAllocUnmap();
3664  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
3665  void DedicatedAllocUnmap(VmaAllocator hAllocator);
3666 
3667 private:
3668  VkDeviceSize m_Alignment;
3669  VkDeviceSize m_Size;
3670  void* m_pUserData;
3671  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
3672  uint8_t m_Type; // ALLOCATION_TYPE
3673  uint8_t m_SuballocationType; // VmaSuballocationType
3674  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
3675  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
3676  uint8_t m_MapCount;
3677  uint8_t m_Flags; // enum FLAGS
3678 
3679  // Allocation out of VmaDeviceMemoryBlock.
3680  struct BlockAllocation
3681  {
3682  VmaPool m_hPool; // Null if belongs to general memory.
3683  VmaDeviceMemoryBlock* m_Block;
3684  VkDeviceSize m_Offset;
3685  bool m_CanBecomeLost;
3686  };
3687 
3688  // Allocation for an object that has its own private VkDeviceMemory.
3689  struct DedicatedAllocation
3690  {
3691  uint32_t m_MemoryTypeIndex;
3692  VkDeviceMemory m_hMemory;
3693  void* m_pMappedData; // Not null means memory is mapped.
3694  };
3695 
3696  union
3697  {
3698  // Allocation out of VmaDeviceMemoryBlock.
3699  BlockAllocation m_BlockAllocation;
3700  // Allocation for an object that has its own private VkDeviceMemory.
3701  DedicatedAllocation m_DedicatedAllocation;
3702  };
3703 
3704  void FreeUserDataString(VmaAllocator hAllocator);
3705 };
3706 
3707 /*
3708 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
3709 allocated memory block or free.
3710 */
3711 struct VmaSuballocation
3712 {
3713  VkDeviceSize offset;
3714  VkDeviceSize size;
3715  VmaAllocation hAllocation;
3716  VmaSuballocationType type;
3717 };
3718 
3719 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
3720 
3721 // Cost of one additional allocation lost, as equivalent in bytes.
3722 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
3723 
3724 /*
3725 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
3726 
3727 If canMakeOtherLost was false:
3728 - item points to a FREE suballocation.
3729 - itemsToMakeLostCount is 0.
3730 
3731 If canMakeOtherLost was true:
3732 - item points to first of sequence of suballocations, which are either FREE,
3733  or point to VmaAllocations that can become lost.
3734 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
3735  the requested allocation to succeed.
3736 */
3737 struct VmaAllocationRequest
3738 {
3739  VkDeviceSize offset;
3740  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
3741  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
3742  VmaSuballocationList::iterator item;
3743  size_t itemsToMakeLostCount;
3744 
3745  VkDeviceSize CalcCost() const
3746  {
3747  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
3748  }
3749 };
3750 
3751 /*
3752 Data structure used for bookkeeping of allocations and unused ranges of memory
3753 in a single VkDeviceMemory block.
3754 */
3755 class VmaBlockMetadata
3756 {
3757 public:
3758  VmaBlockMetadata(VmaAllocator hAllocator);
3759  ~VmaBlockMetadata();
3760  void Init(VkDeviceSize size);
3761 
3762  // Validates all data structures inside this object. If not valid, returns false.
3763  bool Validate() const;
3764  VkDeviceSize GetSize() const { return m_Size; }
3765  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
3766  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
3767  VkDeviceSize GetUnusedRangeSizeMax() const;
3768  // Returns true if this block is empty - contains only single free suballocation.
3769  bool IsEmpty() const;
3770 
3771  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
3772  void AddPoolStats(VmaPoolStats& inoutStats) const;
3773 
3774 #if VMA_STATS_STRING_ENABLED
3775  void PrintDetailedMap(class VmaJsonWriter& json) const;
3776 #endif
3777 
3778  // Creates trivial request for case when block is empty.
3779  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
3780 
3781  // Tries to find a place for suballocation with given parameters inside this block.
3782  // If succeeded, fills pAllocationRequest and returns true.
3783  // If failed, returns false.
3784  bool CreateAllocationRequest(
3785  uint32_t currentFrameIndex,
3786  uint32_t frameInUseCount,
3787  VkDeviceSize bufferImageGranularity,
3788  VkDeviceSize allocSize,
3789  VkDeviceSize allocAlignment,
3790  VmaSuballocationType allocType,
3791  bool canMakeOtherLost,
3792  VmaAllocationRequest* pAllocationRequest);
3793 
3794  bool MakeRequestedAllocationsLost(
3795  uint32_t currentFrameIndex,
3796  uint32_t frameInUseCount,
3797  VmaAllocationRequest* pAllocationRequest);
3798 
3799  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3800 
3801  // Makes actual allocation based on request. Request must already be checked and valid.
3802  void Alloc(
3803  const VmaAllocationRequest& request,
3804  VmaSuballocationType type,
3805  VkDeviceSize allocSize,
3806  VmaAllocation hAllocation);
3807 
3808  // Frees suballocation assigned to given memory region.
3809  void Free(const VmaAllocation allocation);
3810  void FreeAtOffset(VkDeviceSize offset);
3811 
3812 private:
3813  VkDeviceSize m_Size;
3814  uint32_t m_FreeCount;
3815  VkDeviceSize m_SumFreeSize;
3816  VmaSuballocationList m_Suballocations;
3817  // Suballocations that are free and have size greater than certain threshold.
3818  // Sorted by size, ascending.
3819  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
3820 
3821  bool ValidateFreeSuballocationList() const;
3822 
3823  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
3824  // If yes, fills pOffset and returns true. If no, returns false.
3825  bool CheckAllocation(
3826  uint32_t currentFrameIndex,
3827  uint32_t frameInUseCount,
3828  VkDeviceSize bufferImageGranularity,
3829  VkDeviceSize allocSize,
3830  VkDeviceSize allocAlignment,
3831  VmaSuballocationType allocType,
3832  VmaSuballocationList::const_iterator suballocItem,
3833  bool canMakeOtherLost,
3834  VkDeviceSize* pOffset,
3835  size_t* itemsToMakeLostCount,
3836  VkDeviceSize* pSumFreeSize,
3837  VkDeviceSize* pSumItemSize) const;
3838  // Given free suballocation, it merges it with following one, which must also be free.
3839  void MergeFreeWithNext(VmaSuballocationList::iterator item);
3840  // Releases given suballocation, making it free.
3841  // Merges it with adjacent free suballocations if applicable.
3842  // Returns iterator to new free suballocation at this place.
3843  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
3844  // Given free suballocation, it inserts it into sorted list of
3845  // m_FreeSuballocationsBySize if it's suitable.
3846  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
3847  // Given free suballocation, it removes it from sorted list of
3848  // m_FreeSuballocationsBySize if it's suitable.
3849  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
3850 };
3851 
3852 // Helper class that represents mapped memory. Synchronized internally.
3853 class VmaDeviceMemoryMapping
3854 {
3855 public:
3856  VmaDeviceMemoryMapping();
3857  ~VmaDeviceMemoryMapping();
3858 
3859  void* GetMappedData() const { return m_pMappedData; }
3860 
3861  // ppData can be null.
3862  VkResult Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count, void **ppData);
3863  void Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count);
3864 
3865 private:
3866  VMA_MUTEX m_Mutex;
3867  uint32_t m_MapCount;
3868  void* m_pMappedData;
3869 };
3870 
3871 /*
3872 Represents a single block of device memory (`VkDeviceMemory`) with all the
3873 data about its regions (aka suballocations, `VmaAllocation`), assigned and free.
3874 
3875 Thread-safety: This class must be externally synchronized.
3876 */
3877 class VmaDeviceMemoryBlock
3878 {
3879 public:
3880  uint32_t m_MemoryTypeIndex;
3881  VkDeviceMemory m_hMemory;
3882  VmaDeviceMemoryMapping m_Mapping;
3883  VmaBlockMetadata m_Metadata;
3884 
3885  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
3886 
3887  ~VmaDeviceMemoryBlock()
3888  {
3889  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
3890  }
3891 
3892  // Always call after construction.
3893  void Init(
3894  uint32_t newMemoryTypeIndex,
3895  VkDeviceMemory newMemory,
3896  VkDeviceSize newSize);
3897  // Always call before destruction.
3898  void Destroy(VmaAllocator allocator);
3899 
3900  // Validates all data structures inside this object. If not valid, returns false.
3901  bool Validate() const;
3902 
3903  // ppData can be null.
3904  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
3905  void Unmap(VmaAllocator hAllocator, uint32_t count);
3906 };
3907 
3908 struct VmaPointerLess
3909 {
3910  bool operator()(const void* lhs, const void* rhs) const
3911  {
3912  return lhs < rhs;
3913  }
3914 };
3915 
3916 class VmaDefragmentator;
3917 
3918 /*
3919 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
3920 Vulkan memory type.
3921 
3922 Synchronized internally with a mutex.
3923 */
3924 struct VmaBlockVector
3925 {
3926  VmaBlockVector(
3927  VmaAllocator hAllocator,
3928  uint32_t memoryTypeIndex,
3929  VkDeviceSize preferredBlockSize,
3930  size_t minBlockCount,
3931  size_t maxBlockCount,
3932  VkDeviceSize bufferImageGranularity,
3933  uint32_t frameInUseCount,
3934  bool isCustomPool);
3935  ~VmaBlockVector();
3936 
3937  VkResult CreateMinBlocks();
3938 
3939  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
3940  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
3941  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
3942  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
3943 
3944  void GetPoolStats(VmaPoolStats* pStats);
3945 
3946  bool IsEmpty() const { return m_Blocks.empty(); }
3947 
3948  VkResult Allocate(
3949  VmaPool hCurrentPool,
3950  uint32_t currentFrameIndex,
3951  const VkMemoryRequirements& vkMemReq,
3952  const VmaAllocationCreateInfo& createInfo,
3953  VmaSuballocationType suballocType,
3954  VmaAllocation* pAllocation);
3955 
3956  void Free(
3957  VmaAllocation hAllocation);
3958 
3959  // Adds statistics of this BlockVector to pStats.
3960  void AddStats(VmaStats* pStats);
3961 
3962 #if VMA_STATS_STRING_ENABLED
3963  void PrintDetailedMap(class VmaJsonWriter& json);
3964 #endif
3965 
3966  void MakePoolAllocationsLost(
3967  uint32_t currentFrameIndex,
3968  size_t* pLostAllocationCount);
3969 
3970  VmaDefragmentator* EnsureDefragmentator(
3971  VmaAllocator hAllocator,
3972  uint32_t currentFrameIndex);
3973 
3974  VkResult Defragment(
3975  VmaDefragmentationStats* pDefragmentationStats,
3976  VkDeviceSize& maxBytesToMove,
3977  uint32_t& maxAllocationsToMove);
3978 
3979  void DestroyDefragmentator();
3980 
3981 private:
3982  friend class VmaDefragmentator;
3983 
3984  const VmaAllocator m_hAllocator;
3985  const uint32_t m_MemoryTypeIndex;
3986  const VkDeviceSize m_PreferredBlockSize;
3987  const size_t m_MinBlockCount;
3988  const size_t m_MaxBlockCount;
3989  const VkDeviceSize m_BufferImageGranularity;
3990  const uint32_t m_FrameInUseCount;
3991  const bool m_IsCustomPool;
3992  VMA_MUTEX m_Mutex;
3993  // Incrementally sorted by sumFreeSize, ascending.
3994  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
3995  /* There can be at most one allocation that is completely empty - a
3996  hysteresis to avoid pessimistic case of alternating creation and destruction
3997  of a VkDeviceMemory. */
3998  bool m_HasEmptyBlock;
3999  VmaDefragmentator* m_pDefragmentator;
4000 
4001  size_t CalcMaxBlockSize() const;
4002 
4003  // Finds and removes given block from vector.
4004  void Remove(VmaDeviceMemoryBlock* pBlock);
4005 
4006  // Performs single step in sorting m_Blocks. They may not be fully sorted
4007  // after this call.
4008  void IncrementallySortBlocks();
4009 
4010  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
4011 };
4012 
4013 struct VmaPool_T
4014 {
4015 public:
4016  VmaBlockVector m_BlockVector;
4017 
4018  // Takes ownership.
4019  VmaPool_T(
4020  VmaAllocator hAllocator,
4021  const VmaPoolCreateInfo& createInfo);
4022  ~VmaPool_T();
4023 
4024  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
4025 
4026 #if VMA_STATS_STRING_ENABLED
4027  //void PrintDetailedMap(class VmaStringBuilder& sb);
4028 #endif
4029 };
4030 
4031 class VmaDefragmentator
4032 {
4033  const VmaAllocator m_hAllocator;
4034  VmaBlockVector* const m_pBlockVector;
4035  uint32_t m_CurrentFrameIndex;
4036  VkDeviceSize m_BytesMoved;
4037  uint32_t m_AllocationsMoved;
4038 
4039  struct AllocationInfo
4040  {
4041  VmaAllocation m_hAllocation;
4042  VkBool32* m_pChanged;
4043 
4044  AllocationInfo() :
4045  m_hAllocation(VK_NULL_HANDLE),
4046  m_pChanged(VMA_NULL)
4047  {
4048  }
4049  };
4050 
4051  struct AllocationInfoSizeGreater
4052  {
4053  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
4054  {
4055  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
4056  }
4057  };
4058 
4059  // Used between AddAllocation and Defragment.
4060  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
4061 
4062  struct BlockInfo
4063  {
4064  VmaDeviceMemoryBlock* m_pBlock;
4065  bool m_HasNonMovableAllocations;
4066  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
4067 
4068  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
4069  m_pBlock(VMA_NULL),
4070  m_HasNonMovableAllocations(true),
4071  m_Allocations(pAllocationCallbacks),
4072  m_pMappedDataForDefragmentation(VMA_NULL)
4073  {
4074  }
4075 
4076  void CalcHasNonMovableAllocations()
4077  {
4078  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
4079  const size_t defragmentAllocCount = m_Allocations.size();
4080  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
4081  }
4082 
4083  void SortAllocationsBySizeDescecnding()
4084  {
4085  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
4086  }
4087 
4088  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
4089  void Unmap(VmaAllocator hAllocator);
4090 
4091  private:
4092  // Not null if mapped for defragmentation only, not originally mapped.
4093  void* m_pMappedDataForDefragmentation;
4094  };
4095 
4096  struct BlockPointerLess
4097  {
4098  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
4099  {
4100  return pLhsBlockInfo->m_pBlock < pRhsBlock;
4101  }
4102  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
4103  {
4104  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
4105  }
4106  };
4107 
4108  // 1. Blocks with some non-movable allocations go first.
4109  // 2. Blocks with smaller sumFreeSize go first.
4110  struct BlockInfoCompareMoveDestination
4111  {
4112  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
4113  {
4114  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
4115  {
4116  return true;
4117  }
4118  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
4119  {
4120  return false;
4121  }
4122  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
4123  {
4124  return true;
4125  }
4126  return false;
4127  }
4128  };
4129 
4130  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
4131  BlockInfoVector m_Blocks;
4132 
4133  VkResult DefragmentRound(
4134  VkDeviceSize maxBytesToMove,
4135  uint32_t maxAllocationsToMove);
4136 
4137  static bool MoveMakesSense(
4138  size_t dstBlockIndex, VkDeviceSize dstOffset,
4139  size_t srcBlockIndex, VkDeviceSize srcOffset);
4140 
4141 public:
4142  VmaDefragmentator(
4143  VmaAllocator hAllocator,
4144  VmaBlockVector* pBlockVector,
4145  uint32_t currentFrameIndex);
4146 
4147  ~VmaDefragmentator();
4148 
4149  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
4150  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
4151 
4152  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
4153 
4154  VkResult Defragment(
4155  VkDeviceSize maxBytesToMove,
4156  uint32_t maxAllocationsToMove);
4157 };
4158 
4159 // Main allocator object.
4160 struct VmaAllocator_T
4161 {
4162  bool m_UseMutex;
4163  bool m_UseKhrDedicatedAllocation;
4164  VkDevice m_hDevice;
4165  bool m_AllocationCallbacksSpecified;
4166  VkAllocationCallbacks m_AllocationCallbacks;
4167  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
4168 
4169  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
4170  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
4171  VMA_MUTEX m_HeapSizeLimitMutex;
4172 
4173  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
4174  VkPhysicalDeviceMemoryProperties m_MemProps;
4175 
4176  // Default pools.
4177  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
4178 
4179  // Each vector is sorted by memory (handle value).
4180  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
4181  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
4182  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
4183 
4184  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
4185  ~VmaAllocator_T();
4186 
4187  const VkAllocationCallbacks* GetAllocationCallbacks() const
4188  {
4189  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
4190  }
4191  const VmaVulkanFunctions& GetVulkanFunctions() const
4192  {
4193  return m_VulkanFunctions;
4194  }
4195 
4196  VkDeviceSize GetBufferImageGranularity() const
4197  {
4198  return VMA_MAX(
4199  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
4200  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
4201  }
4202 
4203  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
4204  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
4205 
4206  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
4207  {
4208  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
4209  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
4210  }
4211 
4212  void GetBufferMemoryRequirements(
4213  VkBuffer hBuffer,
4214  VkMemoryRequirements& memReq,
4215  bool& requiresDedicatedAllocation,
4216  bool& prefersDedicatedAllocation) const;
4217  void GetImageMemoryRequirements(
4218  VkImage hImage,
4219  VkMemoryRequirements& memReq,
4220  bool& requiresDedicatedAllocation,
4221  bool& prefersDedicatedAllocation) const;
4222 
4223  // Main allocation function.
4224  VkResult AllocateMemory(
4225  const VkMemoryRequirements& vkMemReq,
4226  bool requiresDedicatedAllocation,
4227  bool prefersDedicatedAllocation,
4228  VkBuffer dedicatedBuffer,
4229  VkImage dedicatedImage,
4230  const VmaAllocationCreateInfo& createInfo,
4231  VmaSuballocationType suballocType,
4232  VmaAllocation* pAllocation);
4233 
4234  // Main deallocation function.
4235  void FreeMemory(const VmaAllocation allocation);
4236 
4237  void CalculateStats(VmaStats* pStats);
4238 
4239 #if VMA_STATS_STRING_ENABLED
4240  void PrintDetailedMap(class VmaJsonWriter& json);
4241 #endif
4242 
4243  VkResult Defragment(
4244  VmaAllocation* pAllocations,
4245  size_t allocationCount,
4246  VkBool32* pAllocationsChanged,
4247  const VmaDefragmentationInfo* pDefragmentationInfo,
4248  VmaDefragmentationStats* pDefragmentationStats);
4249 
4250  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
4251  bool TouchAllocation(VmaAllocation hAllocation);
4252 
4253  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
4254  void DestroyPool(VmaPool pool);
4255  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
4256 
4257  void SetCurrentFrameIndex(uint32_t frameIndex);
4258 
4259  void MakePoolAllocationsLost(
4260  VmaPool hPool,
4261  size_t* pLostAllocationCount);
4262 
4263  void CreateLostAllocation(VmaAllocation* pAllocation);
4264 
4265  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
4266  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
4267 
4268  VkResult Map(VmaAllocation hAllocation, void** ppData);
4269  void Unmap(VmaAllocation hAllocation);
4270 
4271 private:
4272  VkDeviceSize m_PreferredLargeHeapBlockSize;
4273 
4274  VkPhysicalDevice m_PhysicalDevice;
4275  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
4276 
4277  VMA_MUTEX m_PoolsMutex;
4278  // Protected by m_PoolsMutex. Sorted by pointer value.
4279  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
4280 
4281  VmaVulkanFunctions m_VulkanFunctions;
4282 
4283  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
4284 
4285  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
4286 
4287  VkResult AllocateMemoryOfType(
4288  const VkMemoryRequirements& vkMemReq,
4289  bool dedicatedAllocation,
4290  VkBuffer dedicatedBuffer,
4291  VkImage dedicatedImage,
4292  const VmaAllocationCreateInfo& createInfo,
4293  uint32_t memTypeIndex,
4294  VmaSuballocationType suballocType,
4295  VmaAllocation* pAllocation);
4296 
4297  // Allocates and registers new VkDeviceMemory specifically for single allocation.
4298  VkResult AllocateDedicatedMemory(
4299  VkDeviceSize size,
4300  VmaSuballocationType suballocType,
4301  uint32_t memTypeIndex,
4302  bool map,
4303  bool isUserDataString,
4304  void* pUserData,
4305  VkBuffer dedicatedBuffer,
4306  VkImage dedicatedImage,
4307  VmaAllocation* pAllocation);
4308 
4309  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
4310  void FreeDedicatedMemory(VmaAllocation allocation);
4311 };
4312 
4314 // Memory allocation #2 after VmaAllocator_T definition
4315 
4316 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
4317 {
4318  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
4319 }
4320 
4321 static void VmaFree(VmaAllocator hAllocator, void* ptr)
4322 {
4323  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
4324 }
4325 
4326 template<typename T>
4327 static T* VmaAllocate(VmaAllocator hAllocator)
4328 {
4329  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
4330 }
4331 
4332 template<typename T>
4333 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
4334 {
4335  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
4336 }
4337 
4338 template<typename T>
4339 static void vma_delete(VmaAllocator hAllocator, T* ptr)
4340 {
4341  if(ptr != VMA_NULL)
4342  {
4343  ptr->~T();
4344  VmaFree(hAllocator, ptr);
4345  }
4346 }
4347 
4348 template<typename T>
4349 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
4350 {
4351  if(ptr != VMA_NULL)
4352  {
4353  for(size_t i = count; i--; )
4354  ptr[i].~T();
4355  VmaFree(hAllocator, ptr);
4356  }
4357 }
4358 
4360 // VmaStringBuilder
4361 
4362 #if VMA_STATS_STRING_ENABLED
4363 
4364 class VmaStringBuilder
4365 {
4366 public:
4367  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
4368  size_t GetLength() const { return m_Data.size(); }
4369  const char* GetData() const { return m_Data.data(); }
4370 
4371  void Add(char ch) { m_Data.push_back(ch); }
4372  void Add(const char* pStr);
4373  void AddNewLine() { Add('\n'); }
4374  void AddNumber(uint32_t num);
4375  void AddNumber(uint64_t num);
4376  void AddPointer(const void* ptr);
4377 
4378 private:
4379  VmaVector< char, VmaStlAllocator<char> > m_Data;
4380 };
4381 
4382 void VmaStringBuilder::Add(const char* pStr)
4383 {
4384  const size_t strLen = strlen(pStr);
4385  if(strLen > 0)
4386  {
4387  const size_t oldCount = m_Data.size();
4388  m_Data.resize(oldCount + strLen);
4389  memcpy(m_Data.data() + oldCount, pStr, strLen);
4390  }
4391 }
4392 
4393 void VmaStringBuilder::AddNumber(uint32_t num)
4394 {
4395  char buf[11];
4396  VmaUint32ToStr(buf, sizeof(buf), num);
4397  Add(buf);
4398 }
4399 
4400 void VmaStringBuilder::AddNumber(uint64_t num)
4401 {
4402  char buf[21];
4403  VmaUint64ToStr(buf, sizeof(buf), num);
4404  Add(buf);
4405 }
4406 
4407 void VmaStringBuilder::AddPointer(const void* ptr)
4408 {
4409  char buf[21];
4410  VmaPtrToStr(buf, sizeof(buf), ptr);
4411  Add(buf);
4412 }
4413 
4414 #endif // #if VMA_STATS_STRING_ENABLED
4415 
4417 // VmaJsonWriter
4418 
4419 #if VMA_STATS_STRING_ENABLED
4420 
4421 class VmaJsonWriter
4422 {
4423 public:
4424  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
4425  ~VmaJsonWriter();
4426 
4427  void BeginObject(bool singleLine = false);
4428  void EndObject();
4429 
4430  void BeginArray(bool singleLine = false);
4431  void EndArray();
4432 
4433  void WriteString(const char* pStr);
4434  void BeginString(const char* pStr = VMA_NULL);
4435  void ContinueString(const char* pStr);
4436  void ContinueString(uint32_t n);
4437  void ContinueString(uint64_t n);
4438  void ContinueString_Pointer(const void* ptr);
4439  void EndString(const char* pStr = VMA_NULL);
4440 
4441  void WriteNumber(uint32_t n);
4442  void WriteNumber(uint64_t n);
4443  void WriteBool(bool b);
4444  void WriteNull();
4445 
4446 private:
4447  static const char* const INDENT;
4448 
4449  enum COLLECTION_TYPE
4450  {
4451  COLLECTION_TYPE_OBJECT,
4452  COLLECTION_TYPE_ARRAY,
4453  };
4454  struct StackItem
4455  {
4456  COLLECTION_TYPE type;
4457  uint32_t valueCount;
4458  bool singleLineMode;
4459  };
4460 
4461  VmaStringBuilder& m_SB;
4462  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
4463  bool m_InsideString;
4464 
4465  void BeginValue(bool isString);
4466  void WriteIndent(bool oneLess = false);
4467 };
4468 
4469 const char* const VmaJsonWriter::INDENT = " ";
4470 
4471 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
4472  m_SB(sb),
4473  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
4474  m_InsideString(false)
4475 {
4476 }
4477 
4478 VmaJsonWriter::~VmaJsonWriter()
4479 {
4480  VMA_ASSERT(!m_InsideString);
4481  VMA_ASSERT(m_Stack.empty());
4482 }
4483 
4484 void VmaJsonWriter::BeginObject(bool singleLine)
4485 {
4486  VMA_ASSERT(!m_InsideString);
4487 
4488  BeginValue(false);
4489  m_SB.Add('{');
4490 
4491  StackItem item;
4492  item.type = COLLECTION_TYPE_OBJECT;
4493  item.valueCount = 0;
4494  item.singleLineMode = singleLine;
4495  m_Stack.push_back(item);
4496 }
4497 
4498 void VmaJsonWriter::EndObject()
4499 {
4500  VMA_ASSERT(!m_InsideString);
4501 
4502  WriteIndent(true);
4503  m_SB.Add('}');
4504 
4505  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
4506  m_Stack.pop_back();
4507 }
4508 
4509 void VmaJsonWriter::BeginArray(bool singleLine)
4510 {
4511  VMA_ASSERT(!m_InsideString);
4512 
4513  BeginValue(false);
4514  m_SB.Add('[');
4515 
4516  StackItem item;
4517  item.type = COLLECTION_TYPE_ARRAY;
4518  item.valueCount = 0;
4519  item.singleLineMode = singleLine;
4520  m_Stack.push_back(item);
4521 }
4522 
4523 void VmaJsonWriter::EndArray()
4524 {
4525  VMA_ASSERT(!m_InsideString);
4526 
4527  WriteIndent(true);
4528  m_SB.Add(']');
4529 
4530  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
4531  m_Stack.pop_back();
4532 }
4533 
4534 void VmaJsonWriter::WriteString(const char* pStr)
4535 {
4536  BeginString(pStr);
4537  EndString();
4538 }
4539 
4540 void VmaJsonWriter::BeginString(const char* pStr)
4541 {
4542  VMA_ASSERT(!m_InsideString);
4543 
4544  BeginValue(true);
4545  m_SB.Add('"');
4546  m_InsideString = true;
4547  if(pStr != VMA_NULL && pStr[0] != '\0')
4548  {
4549  ContinueString(pStr);
4550  }
4551 }
4552 
4553 void VmaJsonWriter::ContinueString(const char* pStr)
4554 {
4555  VMA_ASSERT(m_InsideString);
4556 
4557  const size_t strLen = strlen(pStr);
4558  for(size_t i = 0; i < strLen; ++i)
4559  {
4560  char ch = pStr[i];
4561  if(ch == '\'')
4562  {
4563  m_SB.Add("\\\\");
4564  }
4565  else if(ch == '"')
4566  {
4567  m_SB.Add("\\\"");
4568  }
4569  else if(ch >= 32)
4570  {
4571  m_SB.Add(ch);
4572  }
4573  else switch(ch)
4574  {
4575  case '\b':
4576  m_SB.Add("\\b");
4577  break;
4578  case '\f':
4579  m_SB.Add("\\f");
4580  break;
4581  case '\n':
4582  m_SB.Add("\\n");
4583  break;
4584  case '\r':
4585  m_SB.Add("\\r");
4586  break;
4587  case '\t':
4588  m_SB.Add("\\t");
4589  break;
4590  default:
4591  VMA_ASSERT(0 && "Character not currently supported.");
4592  break;
4593  }
4594  }
4595 }
4596 
4597 void VmaJsonWriter::ContinueString(uint32_t n)
4598 {
4599  VMA_ASSERT(m_InsideString);
4600  m_SB.AddNumber(n);
4601 }
4602 
4603 void VmaJsonWriter::ContinueString(uint64_t n)
4604 {
4605  VMA_ASSERT(m_InsideString);
4606  m_SB.AddNumber(n);
4607 }
4608 
4609 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
4610 {
4611  VMA_ASSERT(m_InsideString);
4612  m_SB.AddPointer(ptr);
4613 }
4614 
4615 void VmaJsonWriter::EndString(const char* pStr)
4616 {
4617  VMA_ASSERT(m_InsideString);
4618  if(pStr != VMA_NULL && pStr[0] != '\0')
4619  {
4620  ContinueString(pStr);
4621  }
4622  m_SB.Add('"');
4623  m_InsideString = false;
4624 }
4625 
4626 void VmaJsonWriter::WriteNumber(uint32_t n)
4627 {
4628  VMA_ASSERT(!m_InsideString);
4629  BeginValue(false);
4630  m_SB.AddNumber(n);
4631 }
4632 
4633 void VmaJsonWriter::WriteNumber(uint64_t n)
4634 {
4635  VMA_ASSERT(!m_InsideString);
4636  BeginValue(false);
4637  m_SB.AddNumber(n);
4638 }
4639 
4640 void VmaJsonWriter::WriteBool(bool b)
4641 {
4642  VMA_ASSERT(!m_InsideString);
4643  BeginValue(false);
4644  m_SB.Add(b ? "true" : "false");
4645 }
4646 
4647 void VmaJsonWriter::WriteNull()
4648 {
4649  VMA_ASSERT(!m_InsideString);
4650  BeginValue(false);
4651  m_SB.Add("null");
4652 }
4653 
4654 void VmaJsonWriter::BeginValue(bool isString)
4655 {
4656  if(!m_Stack.empty())
4657  {
4658  StackItem& currItem = m_Stack.back();
4659  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4660  currItem.valueCount % 2 == 0)
4661  {
4662  VMA_ASSERT(isString);
4663  }
4664 
4665  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4666  currItem.valueCount % 2 != 0)
4667  {
4668  m_SB.Add(": ");
4669  }
4670  else if(currItem.valueCount > 0)
4671  {
4672  m_SB.Add(", ");
4673  WriteIndent();
4674  }
4675  else
4676  {
4677  WriteIndent();
4678  }
4679  ++currItem.valueCount;
4680  }
4681 }
4682 
4683 void VmaJsonWriter::WriteIndent(bool oneLess)
4684 {
4685  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
4686  {
4687  m_SB.AddNewLine();
4688 
4689  size_t count = m_Stack.size();
4690  if(count > 0 && oneLess)
4691  {
4692  --count;
4693  }
4694  for(size_t i = 0; i < count; ++i)
4695  {
4696  m_SB.Add(INDENT);
4697  }
4698  }
4699 }
4700 
4701 #endif // #if VMA_STATS_STRING_ENABLED
4702 
4704 
4705 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
4706 {
4707  if(IsUserDataString())
4708  {
4709  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
4710 
4711  FreeUserDataString(hAllocator);
4712 
4713  if(pUserData != VMA_NULL)
4714  {
4715  const char* const newStrSrc = (char*)pUserData;
4716  const size_t newStrLen = strlen(newStrSrc);
4717  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
4718  memcpy(newStrDst, newStrSrc, newStrLen + 1);
4719  m_pUserData = newStrDst;
4720  }
4721  }
4722  else
4723  {
4724  m_pUserData = pUserData;
4725  }
4726 }
4727 
4728 void VmaAllocation_T::ChangeBlockAllocation(
4729  VmaAllocator hAllocator,
4730  VmaDeviceMemoryBlock* block,
4731  VkDeviceSize offset)
4732 {
4733  VMA_ASSERT(block != VMA_NULL);
4734  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4735 
4736  // Move mapping reference counter from old block to new block.
4737  if(block != m_BlockAllocation.m_Block)
4738  {
4739  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
4740  if(IsPersistentMap())
4741  ++mapRefCount;
4742  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
4743  block->Map(hAllocator, mapRefCount, VMA_NULL);
4744  }
4745 
4746  m_BlockAllocation.m_Block = block;
4747  m_BlockAllocation.m_Offset = offset;
4748 }
4749 
4750 VkDeviceSize VmaAllocation_T::GetOffset() const
4751 {
4752  switch(m_Type)
4753  {
4754  case ALLOCATION_TYPE_BLOCK:
4755  return m_BlockAllocation.m_Offset;
4756  case ALLOCATION_TYPE_DEDICATED:
4757  return 0;
4758  default:
4759  VMA_ASSERT(0);
4760  return 0;
4761  }
4762 }
4763 
4764 VkDeviceMemory VmaAllocation_T::GetMemory() const
4765 {
4766  switch(m_Type)
4767  {
4768  case ALLOCATION_TYPE_BLOCK:
4769  return m_BlockAllocation.m_Block->m_hMemory;
4770  case ALLOCATION_TYPE_DEDICATED:
4771  return m_DedicatedAllocation.m_hMemory;
4772  default:
4773  VMA_ASSERT(0);
4774  return VK_NULL_HANDLE;
4775  }
4776 }
4777 
4778 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
4779 {
4780  switch(m_Type)
4781  {
4782  case ALLOCATION_TYPE_BLOCK:
4783  return m_BlockAllocation.m_Block->m_MemoryTypeIndex;
4784  case ALLOCATION_TYPE_DEDICATED:
4785  return m_DedicatedAllocation.m_MemoryTypeIndex;
4786  default:
4787  VMA_ASSERT(0);
4788  return UINT32_MAX;
4789  }
4790 }
4791 
4792 void* VmaAllocation_T::GetMappedData() const
4793 {
4794  switch(m_Type)
4795  {
4796  case ALLOCATION_TYPE_BLOCK:
4797  if(m_MapCount != 0)
4798  {
4799  void* pBlockData = m_BlockAllocation.m_Block->m_Mapping.GetMappedData();
4800  VMA_ASSERT(pBlockData != VMA_NULL);
4801  return (char*)pBlockData + m_BlockAllocation.m_Offset;
4802  }
4803  else
4804  {
4805  return VMA_NULL;
4806  }
4807  break;
4808  case ALLOCATION_TYPE_DEDICATED:
4809  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
4810  return m_DedicatedAllocation.m_pMappedData;
4811  default:
4812  VMA_ASSERT(0);
4813  return VMA_NULL;
4814  }
4815 }
4816 
4817 bool VmaAllocation_T::CanBecomeLost() const
4818 {
4819  switch(m_Type)
4820  {
4821  case ALLOCATION_TYPE_BLOCK:
4822  return m_BlockAllocation.m_CanBecomeLost;
4823  case ALLOCATION_TYPE_DEDICATED:
4824  return false;
4825  default:
4826  VMA_ASSERT(0);
4827  return false;
4828  }
4829 }
4830 
4831 VmaPool VmaAllocation_T::GetPool() const
4832 {
4833  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4834  return m_BlockAllocation.m_hPool;
4835 }
4836 
4837 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4838 {
4839  VMA_ASSERT(CanBecomeLost());
4840 
4841  /*
4842  Warning: This is a carefully designed algorithm.
4843  Do not modify unless you really know what you're doing :)
4844  */
4845  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
4846  for(;;)
4847  {
4848  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
4849  {
4850  VMA_ASSERT(0);
4851  return false;
4852  }
4853  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
4854  {
4855  return false;
4856  }
4857  else // Last use time earlier than current time.
4858  {
4859  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
4860  {
4861  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
4862  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
4863  return true;
4864  }
4865  }
4866  }
4867 }
4868 
4869 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
4870 {
4871  VMA_ASSERT(IsUserDataString());
4872  if(m_pUserData != VMA_NULL)
4873  {
4874  char* const oldStr = (char*)m_pUserData;
4875  const size_t oldStrLen = strlen(oldStr);
4876  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
4877  m_pUserData = VMA_NULL;
4878  }
4879 }
4880 
4881 void VmaAllocation_T::BlockAllocMap()
4882 {
4883  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
4884 
4885  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
4886  {
4887  ++m_MapCount;
4888  }
4889  else
4890  {
4891  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
4892  }
4893 }
4894 
4895 void VmaAllocation_T::BlockAllocUnmap()
4896 {
4897  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
4898 
4899  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
4900  {
4901  --m_MapCount;
4902  }
4903  else
4904  {
4905  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
4906  }
4907 }
4908 
4909 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
4910 {
4911  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
4912 
4913  if(m_MapCount != 0)
4914  {
4915  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
4916  {
4917  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
4918  *ppData = m_DedicatedAllocation.m_pMappedData;
4919  ++m_MapCount;
4920  return VK_SUCCESS;
4921  }
4922  else
4923  {
4924  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
4925  return VK_ERROR_MEMORY_MAP_FAILED;
4926  }
4927  }
4928  else
4929  {
4930  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
4931  hAllocator->m_hDevice,
4932  m_DedicatedAllocation.m_hMemory,
4933  0, // offset
4934  VK_WHOLE_SIZE,
4935  0, // flags
4936  ppData);
4937  if(result == VK_SUCCESS)
4938  {
4939  m_DedicatedAllocation.m_pMappedData = *ppData;
4940  m_MapCount = 1;
4941  }
4942  return result;
4943  }
4944 }
4945 
4946 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
4947 {
4948  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
4949 
4950  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
4951  {
4952  --m_MapCount;
4953  if(m_MapCount == 0)
4954  {
4955  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
4956  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
4957  hAllocator->m_hDevice,
4958  m_DedicatedAllocation.m_hMemory);
4959  }
4960  }
4961  else
4962  {
4963  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
4964  }
4965 }
4966 
4967 #if VMA_STATS_STRING_ENABLED
4968 
4969 // Correspond to values of enum VmaSuballocationType.
4970 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
4971  "FREE",
4972  "UNKNOWN",
4973  "BUFFER",
4974  "IMAGE_UNKNOWN",
4975  "IMAGE_LINEAR",
4976  "IMAGE_OPTIMAL",
4977 };
4978 
4979 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
4980 {
4981  json.BeginObject();
4982 
4983  json.WriteString("Blocks");
4984  json.WriteNumber(stat.blockCount);
4985 
4986  json.WriteString("Allocations");
4987  json.WriteNumber(stat.allocationCount);
4988 
4989  json.WriteString("UnusedRanges");
4990  json.WriteNumber(stat.unusedRangeCount);
4991 
4992  json.WriteString("UsedBytes");
4993  json.WriteNumber(stat.usedBytes);
4994 
4995  json.WriteString("UnusedBytes");
4996  json.WriteNumber(stat.unusedBytes);
4997 
4998  if(stat.allocationCount > 1)
4999  {
5000  json.WriteString("AllocationSize");
5001  json.BeginObject(true);
5002  json.WriteString("Min");
5003  json.WriteNumber(stat.allocationSizeMin);
5004  json.WriteString("Avg");
5005  json.WriteNumber(stat.allocationSizeAvg);
5006  json.WriteString("Max");
5007  json.WriteNumber(stat.allocationSizeMax);
5008  json.EndObject();
5009  }
5010 
5011  if(stat.unusedRangeCount > 1)
5012  {
5013  json.WriteString("UnusedRangeSize");
5014  json.BeginObject(true);
5015  json.WriteString("Min");
5016  json.WriteNumber(stat.unusedRangeSizeMin);
5017  json.WriteString("Avg");
5018  json.WriteNumber(stat.unusedRangeSizeAvg);
5019  json.WriteString("Max");
5020  json.WriteNumber(stat.unusedRangeSizeMax);
5021  json.EndObject();
5022  }
5023 
5024  json.EndObject();
5025 }
5026 
5027 #endif // #if VMA_STATS_STRING_ENABLED
5028 
5029 struct VmaSuballocationItemSizeLess
5030 {
5031  bool operator()(
5032  const VmaSuballocationList::iterator lhs,
5033  const VmaSuballocationList::iterator rhs) const
5034  {
5035  return lhs->size < rhs->size;
5036  }
5037  bool operator()(
5038  const VmaSuballocationList::iterator lhs,
5039  VkDeviceSize rhsSize) const
5040  {
5041  return lhs->size < rhsSize;
5042  }
5043 };
5044 
5046 // class VmaBlockMetadata
5047 
5048 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
5049  m_Size(0),
5050  m_FreeCount(0),
5051  m_SumFreeSize(0),
5052  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
5053  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
5054 {
5055 }
5056 
5057 VmaBlockMetadata::~VmaBlockMetadata()
5058 {
5059 }
5060 
5061 void VmaBlockMetadata::Init(VkDeviceSize size)
5062 {
5063  m_Size = size;
5064  m_FreeCount = 1;
5065  m_SumFreeSize = size;
5066 
5067  VmaSuballocation suballoc = {};
5068  suballoc.offset = 0;
5069  suballoc.size = size;
5070  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5071  suballoc.hAllocation = VK_NULL_HANDLE;
5072 
5073  m_Suballocations.push_back(suballoc);
5074  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
5075  --suballocItem;
5076  m_FreeSuballocationsBySize.push_back(suballocItem);
5077 }
5078 
5079 bool VmaBlockMetadata::Validate() const
5080 {
5081  if(m_Suballocations.empty())
5082  {
5083  return false;
5084  }
5085 
5086  // Expected offset of new suballocation as calculates from previous ones.
5087  VkDeviceSize calculatedOffset = 0;
5088  // Expected number of free suballocations as calculated from traversing their list.
5089  uint32_t calculatedFreeCount = 0;
5090  // Expected sum size of free suballocations as calculated from traversing their list.
5091  VkDeviceSize calculatedSumFreeSize = 0;
5092  // Expected number of free suballocations that should be registered in
5093  // m_FreeSuballocationsBySize calculated from traversing their list.
5094  size_t freeSuballocationsToRegister = 0;
5095  // True if previous visisted suballocation was free.
5096  bool prevFree = false;
5097 
5098  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5099  suballocItem != m_Suballocations.cend();
5100  ++suballocItem)
5101  {
5102  const VmaSuballocation& subAlloc = *suballocItem;
5103 
5104  // Actual offset of this suballocation doesn't match expected one.
5105  if(subAlloc.offset != calculatedOffset)
5106  {
5107  return false;
5108  }
5109 
5110  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
5111  // Two adjacent free suballocations are invalid. They should be merged.
5112  if(prevFree && currFree)
5113  {
5114  return false;
5115  }
5116 
5117  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
5118  {
5119  return false;
5120  }
5121 
5122  if(currFree)
5123  {
5124  calculatedSumFreeSize += subAlloc.size;
5125  ++calculatedFreeCount;
5126  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5127  {
5128  ++freeSuballocationsToRegister;
5129  }
5130  }
5131  else
5132  {
5133  if(subAlloc.hAllocation->GetOffset() != subAlloc.offset)
5134  {
5135  return false;
5136  }
5137  if(subAlloc.hAllocation->GetSize() != subAlloc.size)
5138  {
5139  return false;
5140  }
5141  }
5142 
5143  calculatedOffset += subAlloc.size;
5144  prevFree = currFree;
5145  }
5146 
5147  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
5148  // match expected one.
5149  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
5150  {
5151  return false;
5152  }
5153 
5154  VkDeviceSize lastSize = 0;
5155  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
5156  {
5157  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
5158 
5159  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
5160  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
5161  {
5162  return false;
5163  }
5164  // They must be sorted by size ascending.
5165  if(suballocItem->size < lastSize)
5166  {
5167  return false;
5168  }
5169 
5170  lastSize = suballocItem->size;
5171  }
5172 
5173  // Check if totals match calculacted values.
5174  if(!ValidateFreeSuballocationList() ||
5175  (calculatedOffset != m_Size) ||
5176  (calculatedSumFreeSize != m_SumFreeSize) ||
5177  (calculatedFreeCount != m_FreeCount))
5178  {
5179  return false;
5180  }
5181 
5182  return true;
5183 }
5184 
5185 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
5186 {
5187  if(!m_FreeSuballocationsBySize.empty())
5188  {
5189  return m_FreeSuballocationsBySize.back()->size;
5190  }
5191  else
5192  {
5193  return 0;
5194  }
5195 }
5196 
5197 bool VmaBlockMetadata::IsEmpty() const
5198 {
5199  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
5200 }
5201 
5202 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
5203 {
5204  outInfo.blockCount = 1;
5205 
5206  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
5207  outInfo.allocationCount = rangeCount - m_FreeCount;
5208  outInfo.unusedRangeCount = m_FreeCount;
5209 
5210  outInfo.unusedBytes = m_SumFreeSize;
5211  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
5212 
5213  outInfo.allocationSizeMin = UINT64_MAX;
5214  outInfo.allocationSizeMax = 0;
5215  outInfo.unusedRangeSizeMin = UINT64_MAX;
5216  outInfo.unusedRangeSizeMax = 0;
5217 
5218  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5219  suballocItem != m_Suballocations.cend();
5220  ++suballocItem)
5221  {
5222  const VmaSuballocation& suballoc = *suballocItem;
5223  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
5224  {
5225  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
5226  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
5227  }
5228  else
5229  {
5230  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
5231  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
5232  }
5233  }
5234 }
5235 
5236 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
5237 {
5238  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
5239 
5240  inoutStats.size += m_Size;
5241  inoutStats.unusedSize += m_SumFreeSize;
5242  inoutStats.allocationCount += rangeCount - m_FreeCount;
5243  inoutStats.unusedRangeCount += m_FreeCount;
5244  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
5245 }
5246 
5247 #if VMA_STATS_STRING_ENABLED
5248 
5249 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
5250 {
5251  json.BeginObject();
5252 
5253  json.WriteString("TotalBytes");
5254  json.WriteNumber(m_Size);
5255 
5256  json.WriteString("UnusedBytes");
5257  json.WriteNumber(m_SumFreeSize);
5258 
5259  json.WriteString("Allocations");
5260  json.WriteNumber((uint64_t)m_Suballocations.size() - m_FreeCount);
5261 
5262  json.WriteString("UnusedRanges");
5263  json.WriteNumber(m_FreeCount);
5264 
5265  json.WriteString("Suballocations");
5266  json.BeginArray();
5267  size_t i = 0;
5268  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5269  suballocItem != m_Suballocations.cend();
5270  ++suballocItem, ++i)
5271  {
5272  json.BeginObject(true);
5273 
5274  json.WriteString("Type");
5275  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
5276 
5277  json.WriteString("Size");
5278  json.WriteNumber(suballocItem->size);
5279 
5280  json.WriteString("Offset");
5281  json.WriteNumber(suballocItem->offset);
5282 
5283  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
5284  {
5285  const void* pUserData = suballocItem->hAllocation->GetUserData();
5286  if(pUserData != VMA_NULL)
5287  {
5288  json.WriteString("UserData");
5289  if(suballocItem->hAllocation->IsUserDataString())
5290  {
5291  json.WriteString((const char*)pUserData);
5292  }
5293  else
5294  {
5295  json.BeginString();
5296  json.ContinueString_Pointer(pUserData);
5297  json.EndString();
5298  }
5299  }
5300  }
5301 
5302  json.EndObject();
5303  }
5304  json.EndArray();
5305 
5306  json.EndObject();
5307 }
5308 
5309 #endif // #if VMA_STATS_STRING_ENABLED
5310 
5311 /*
5312 How many suitable free suballocations to analyze before choosing best one.
5313 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
5314  be chosen.
5315 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
5316  suballocations will be analized and best one will be chosen.
5317 - Any other value is also acceptable.
5318 */
5319 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
5320 
5321 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
5322 {
5323  VMA_ASSERT(IsEmpty());
5324  pAllocationRequest->offset = 0;
5325  pAllocationRequest->sumFreeSize = m_SumFreeSize;
5326  pAllocationRequest->sumItemSize = 0;
5327  pAllocationRequest->item = m_Suballocations.begin();
5328  pAllocationRequest->itemsToMakeLostCount = 0;
5329 }
5330 
5331 bool VmaBlockMetadata::CreateAllocationRequest(
5332  uint32_t currentFrameIndex,
5333  uint32_t frameInUseCount,
5334  VkDeviceSize bufferImageGranularity,
5335  VkDeviceSize allocSize,
5336  VkDeviceSize allocAlignment,
5337  VmaSuballocationType allocType,
5338  bool canMakeOtherLost,
5339  VmaAllocationRequest* pAllocationRequest)
5340 {
5341  VMA_ASSERT(allocSize > 0);
5342  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5343  VMA_ASSERT(pAllocationRequest != VMA_NULL);
5344  VMA_HEAVY_ASSERT(Validate());
5345 
5346  // There is not enough total free space in this block to fullfill the request: Early return.
5347  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
5348  {
5349  return false;
5350  }
5351 
5352  // New algorithm, efficiently searching freeSuballocationsBySize.
5353  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
5354  if(freeSuballocCount > 0)
5355  {
5356  if(VMA_BEST_FIT)
5357  {
5358  // Find first free suballocation with size not less than allocSize.
5359  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5360  m_FreeSuballocationsBySize.data(),
5361  m_FreeSuballocationsBySize.data() + freeSuballocCount,
5362  allocSize,
5363  VmaSuballocationItemSizeLess());
5364  size_t index = it - m_FreeSuballocationsBySize.data();
5365  for(; index < freeSuballocCount; ++index)
5366  {
5367  if(CheckAllocation(
5368  currentFrameIndex,
5369  frameInUseCount,
5370  bufferImageGranularity,
5371  allocSize,
5372  allocAlignment,
5373  allocType,
5374  m_FreeSuballocationsBySize[index],
5375  false, // canMakeOtherLost
5376  &pAllocationRequest->offset,
5377  &pAllocationRequest->itemsToMakeLostCount,
5378  &pAllocationRequest->sumFreeSize,
5379  &pAllocationRequest->sumItemSize))
5380  {
5381  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5382  return true;
5383  }
5384  }
5385  }
5386  else
5387  {
5388  // Search staring from biggest suballocations.
5389  for(size_t index = freeSuballocCount; index--; )
5390  {
5391  if(CheckAllocation(
5392  currentFrameIndex,
5393  frameInUseCount,
5394  bufferImageGranularity,
5395  allocSize,
5396  allocAlignment,
5397  allocType,
5398  m_FreeSuballocationsBySize[index],
5399  false, // canMakeOtherLost
5400  &pAllocationRequest->offset,
5401  &pAllocationRequest->itemsToMakeLostCount,
5402  &pAllocationRequest->sumFreeSize,
5403  &pAllocationRequest->sumItemSize))
5404  {
5405  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5406  return true;
5407  }
5408  }
5409  }
5410  }
5411 
5412  if(canMakeOtherLost)
5413  {
5414  // Brute-force algorithm. TODO: Come up with something better.
5415 
5416  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
5417  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
5418 
5419  VmaAllocationRequest tmpAllocRequest = {};
5420  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
5421  suballocIt != m_Suballocations.end();
5422  ++suballocIt)
5423  {
5424  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
5425  suballocIt->hAllocation->CanBecomeLost())
5426  {
5427  if(CheckAllocation(
5428  currentFrameIndex,
5429  frameInUseCount,
5430  bufferImageGranularity,
5431  allocSize,
5432  allocAlignment,
5433  allocType,
5434  suballocIt,
5435  canMakeOtherLost,
5436  &tmpAllocRequest.offset,
5437  &tmpAllocRequest.itemsToMakeLostCount,
5438  &tmpAllocRequest.sumFreeSize,
5439  &tmpAllocRequest.sumItemSize))
5440  {
5441  tmpAllocRequest.item = suballocIt;
5442 
5443  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
5444  {
5445  *pAllocationRequest = tmpAllocRequest;
5446  }
5447  }
5448  }
5449  }
5450 
5451  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
5452  {
5453  return true;
5454  }
5455  }
5456 
5457  return false;
5458 }
5459 
5460 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
5461  uint32_t currentFrameIndex,
5462  uint32_t frameInUseCount,
5463  VmaAllocationRequest* pAllocationRequest)
5464 {
5465  while(pAllocationRequest->itemsToMakeLostCount > 0)
5466  {
5467  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
5468  {
5469  ++pAllocationRequest->item;
5470  }
5471  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5472  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
5473  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
5474  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5475  {
5476  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
5477  --pAllocationRequest->itemsToMakeLostCount;
5478  }
5479  else
5480  {
5481  return false;
5482  }
5483  }
5484 
5485  VMA_HEAVY_ASSERT(Validate());
5486  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5487  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
5488 
5489  return true;
5490 }
5491 
5492 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
5493 {
5494  uint32_t lostAllocationCount = 0;
5495  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
5496  it != m_Suballocations.end();
5497  ++it)
5498  {
5499  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
5500  it->hAllocation->CanBecomeLost() &&
5501  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5502  {
5503  it = FreeSuballocation(it);
5504  ++lostAllocationCount;
5505  }
5506  }
5507  return lostAllocationCount;
5508 }
5509 
5510 void VmaBlockMetadata::Alloc(
5511  const VmaAllocationRequest& request,
5512  VmaSuballocationType type,
5513  VkDeviceSize allocSize,
5514  VmaAllocation hAllocation)
5515 {
5516  VMA_ASSERT(request.item != m_Suballocations.end());
5517  VmaSuballocation& suballoc = *request.item;
5518  // Given suballocation is a free block.
5519  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5520  // Given offset is inside this suballocation.
5521  VMA_ASSERT(request.offset >= suballoc.offset);
5522  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
5523  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
5524  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
5525 
5526  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
5527  // it to become used.
5528  UnregisterFreeSuballocation(request.item);
5529 
5530  suballoc.offset = request.offset;
5531  suballoc.size = allocSize;
5532  suballoc.type = type;
5533  suballoc.hAllocation = hAllocation;
5534 
5535  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
5536  if(paddingEnd)
5537  {
5538  VmaSuballocation paddingSuballoc = {};
5539  paddingSuballoc.offset = request.offset + allocSize;
5540  paddingSuballoc.size = paddingEnd;
5541  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5542  VmaSuballocationList::iterator next = request.item;
5543  ++next;
5544  const VmaSuballocationList::iterator paddingEndItem =
5545  m_Suballocations.insert(next, paddingSuballoc);
5546  RegisterFreeSuballocation(paddingEndItem);
5547  }
5548 
5549  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
5550  if(paddingBegin)
5551  {
5552  VmaSuballocation paddingSuballoc = {};
5553  paddingSuballoc.offset = request.offset - paddingBegin;
5554  paddingSuballoc.size = paddingBegin;
5555  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5556  const VmaSuballocationList::iterator paddingBeginItem =
5557  m_Suballocations.insert(request.item, paddingSuballoc);
5558  RegisterFreeSuballocation(paddingBeginItem);
5559  }
5560 
5561  // Update totals.
5562  m_FreeCount = m_FreeCount - 1;
5563  if(paddingBegin > 0)
5564  {
5565  ++m_FreeCount;
5566  }
5567  if(paddingEnd > 0)
5568  {
5569  ++m_FreeCount;
5570  }
5571  m_SumFreeSize -= allocSize;
5572 }
5573 
5574 void VmaBlockMetadata::Free(const VmaAllocation allocation)
5575 {
5576  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5577  suballocItem != m_Suballocations.end();
5578  ++suballocItem)
5579  {
5580  VmaSuballocation& suballoc = *suballocItem;
5581  if(suballoc.hAllocation == allocation)
5582  {
5583  FreeSuballocation(suballocItem);
5584  VMA_HEAVY_ASSERT(Validate());
5585  return;
5586  }
5587  }
5588  VMA_ASSERT(0 && "Not found!");
5589 }
5590 
5591 void VmaBlockMetadata::FreeAtOffset(VkDeviceSize offset)
5592 {
5593  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5594  suballocItem != m_Suballocations.end();
5595  ++suballocItem)
5596  {
5597  VmaSuballocation& suballoc = *suballocItem;
5598  if(suballoc.offset == offset)
5599  {
5600  FreeSuballocation(suballocItem);
5601  return;
5602  }
5603  }
5604  VMA_ASSERT(0 && "Not found!");
5605 }
5606 
5607 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
5608 {
5609  VkDeviceSize lastSize = 0;
5610  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
5611  {
5612  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
5613 
5614  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
5615  {
5616  VMA_ASSERT(0);
5617  return false;
5618  }
5619  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5620  {
5621  VMA_ASSERT(0);
5622  return false;
5623  }
5624  if(it->size < lastSize)
5625  {
5626  VMA_ASSERT(0);
5627  return false;
5628  }
5629 
5630  lastSize = it->size;
5631  }
5632  return true;
5633 }
5634 
5635 bool VmaBlockMetadata::CheckAllocation(
5636  uint32_t currentFrameIndex,
5637  uint32_t frameInUseCount,
5638  VkDeviceSize bufferImageGranularity,
5639  VkDeviceSize allocSize,
5640  VkDeviceSize allocAlignment,
5641  VmaSuballocationType allocType,
5642  VmaSuballocationList::const_iterator suballocItem,
5643  bool canMakeOtherLost,
5644  VkDeviceSize* pOffset,
5645  size_t* itemsToMakeLostCount,
5646  VkDeviceSize* pSumFreeSize,
5647  VkDeviceSize* pSumItemSize) const
5648 {
5649  VMA_ASSERT(allocSize > 0);
5650  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5651  VMA_ASSERT(suballocItem != m_Suballocations.cend());
5652  VMA_ASSERT(pOffset != VMA_NULL);
5653 
5654  *itemsToMakeLostCount = 0;
5655  *pSumFreeSize = 0;
5656  *pSumItemSize = 0;
5657 
5658  if(canMakeOtherLost)
5659  {
5660  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5661  {
5662  *pSumFreeSize = suballocItem->size;
5663  }
5664  else
5665  {
5666  if(suballocItem->hAllocation->CanBecomeLost() &&
5667  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5668  {
5669  ++*itemsToMakeLostCount;
5670  *pSumItemSize = suballocItem->size;
5671  }
5672  else
5673  {
5674  return false;
5675  }
5676  }
5677 
5678  // Remaining size is too small for this request: Early return.
5679  if(m_Size - suballocItem->offset < allocSize)
5680  {
5681  return false;
5682  }
5683 
5684  // Start from offset equal to beginning of this suballocation.
5685  *pOffset = suballocItem->offset;
5686 
5687  // Apply VMA_DEBUG_MARGIN at the beginning.
5688  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5689  {
5690  *pOffset += VMA_DEBUG_MARGIN;
5691  }
5692 
5693  // Apply alignment.
5694  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5695  *pOffset = VmaAlignUp(*pOffset, alignment);
5696 
5697  // Check previous suballocations for BufferImageGranularity conflicts.
5698  // Make bigger alignment if necessary.
5699  if(bufferImageGranularity > 1)
5700  {
5701  bool bufferImageGranularityConflict = false;
5702  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5703  while(prevSuballocItem != m_Suballocations.cbegin())
5704  {
5705  --prevSuballocItem;
5706  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5707  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5708  {
5709  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5710  {
5711  bufferImageGranularityConflict = true;
5712  break;
5713  }
5714  }
5715  else
5716  // Already on previous page.
5717  break;
5718  }
5719  if(bufferImageGranularityConflict)
5720  {
5721  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5722  }
5723  }
5724 
5725  // Now that we have final *pOffset, check if we are past suballocItem.
5726  // If yes, return false - this function should be called for another suballocItem as starting point.
5727  if(*pOffset >= suballocItem->offset + suballocItem->size)
5728  {
5729  return false;
5730  }
5731 
5732  // Calculate padding at the beginning based on current offset.
5733  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
5734 
5735  // Calculate required margin at the end if this is not last suballocation.
5736  VmaSuballocationList::const_iterator next = suballocItem;
5737  ++next;
5738  const VkDeviceSize requiredEndMargin =
5739  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5740 
5741  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
5742  // Another early return check.
5743  if(suballocItem->offset + totalSize > m_Size)
5744  {
5745  return false;
5746  }
5747 
5748  // Advance lastSuballocItem until desired size is reached.
5749  // Update itemsToMakeLostCount.
5750  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
5751  if(totalSize > suballocItem->size)
5752  {
5753  VkDeviceSize remainingSize = totalSize - suballocItem->size;
5754  while(remainingSize > 0)
5755  {
5756  ++lastSuballocItem;
5757  if(lastSuballocItem == m_Suballocations.cend())
5758  {
5759  return false;
5760  }
5761  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5762  {
5763  *pSumFreeSize += lastSuballocItem->size;
5764  }
5765  else
5766  {
5767  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
5768  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
5769  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5770  {
5771  ++*itemsToMakeLostCount;
5772  *pSumItemSize += lastSuballocItem->size;
5773  }
5774  else
5775  {
5776  return false;
5777  }
5778  }
5779  remainingSize = (lastSuballocItem->size < remainingSize) ?
5780  remainingSize - lastSuballocItem->size : 0;
5781  }
5782  }
5783 
5784  // Check next suballocations for BufferImageGranularity conflicts.
5785  // If conflict exists, we must mark more allocations lost or fail.
5786  if(bufferImageGranularity > 1)
5787  {
5788  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
5789  ++nextSuballocItem;
5790  while(nextSuballocItem != m_Suballocations.cend())
5791  {
5792  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5793  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5794  {
5795  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5796  {
5797  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
5798  if(nextSuballoc.hAllocation->CanBecomeLost() &&
5799  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5800  {
5801  ++*itemsToMakeLostCount;
5802  }
5803  else
5804  {
5805  return false;
5806  }
5807  }
5808  }
5809  else
5810  {
5811  // Already on next page.
5812  break;
5813  }
5814  ++nextSuballocItem;
5815  }
5816  }
5817  }
5818  else
5819  {
5820  const VmaSuballocation& suballoc = *suballocItem;
5821  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5822 
5823  *pSumFreeSize = suballoc.size;
5824 
5825  // Size of this suballocation is too small for this request: Early return.
5826  if(suballoc.size < allocSize)
5827  {
5828  return false;
5829  }
5830 
5831  // Start from offset equal to beginning of this suballocation.
5832  *pOffset = suballoc.offset;
5833 
5834  // Apply VMA_DEBUG_MARGIN at the beginning.
5835  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5836  {
5837  *pOffset += VMA_DEBUG_MARGIN;
5838  }
5839 
5840  // Apply alignment.
5841  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5842  *pOffset = VmaAlignUp(*pOffset, alignment);
5843 
5844  // Check previous suballocations for BufferImageGranularity conflicts.
5845  // Make bigger alignment if necessary.
5846  if(bufferImageGranularity > 1)
5847  {
5848  bool bufferImageGranularityConflict = false;
5849  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5850  while(prevSuballocItem != m_Suballocations.cbegin())
5851  {
5852  --prevSuballocItem;
5853  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5854  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5855  {
5856  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5857  {
5858  bufferImageGranularityConflict = true;
5859  break;
5860  }
5861  }
5862  else
5863  // Already on previous page.
5864  break;
5865  }
5866  if(bufferImageGranularityConflict)
5867  {
5868  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5869  }
5870  }
5871 
5872  // Calculate padding at the beginning based on current offset.
5873  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
5874 
5875  // Calculate required margin at the end if this is not last suballocation.
5876  VmaSuballocationList::const_iterator next = suballocItem;
5877  ++next;
5878  const VkDeviceSize requiredEndMargin =
5879  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5880 
5881  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
5882  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
5883  {
5884  return false;
5885  }
5886 
5887  // Check next suballocations for BufferImageGranularity conflicts.
5888  // If conflict exists, allocation cannot be made here.
5889  if(bufferImageGranularity > 1)
5890  {
5891  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
5892  ++nextSuballocItem;
5893  while(nextSuballocItem != m_Suballocations.cend())
5894  {
5895  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5896  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5897  {
5898  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5899  {
5900  return false;
5901  }
5902  }
5903  else
5904  {
5905  // Already on next page.
5906  break;
5907  }
5908  ++nextSuballocItem;
5909  }
5910  }
5911  }
5912 
5913  // All tests passed: Success. pOffset is already filled.
5914  return true;
5915 }
5916 
5917 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
5918 {
5919  VMA_ASSERT(item != m_Suballocations.end());
5920  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5921 
5922  VmaSuballocationList::iterator nextItem = item;
5923  ++nextItem;
5924  VMA_ASSERT(nextItem != m_Suballocations.end());
5925  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
5926 
5927  item->size += nextItem->size;
5928  --m_FreeCount;
5929  m_Suballocations.erase(nextItem);
5930 }
5931 
5932 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
5933 {
5934  // Change this suballocation to be marked as free.
5935  VmaSuballocation& suballoc = *suballocItem;
5936  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5937  suballoc.hAllocation = VK_NULL_HANDLE;
5938 
5939  // Update totals.
5940  ++m_FreeCount;
5941  m_SumFreeSize += suballoc.size;
5942 
5943  // Merge with previous and/or next suballocation if it's also free.
5944  bool mergeWithNext = false;
5945  bool mergeWithPrev = false;
5946 
5947  VmaSuballocationList::iterator nextItem = suballocItem;
5948  ++nextItem;
5949  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
5950  {
5951  mergeWithNext = true;
5952  }
5953 
5954  VmaSuballocationList::iterator prevItem = suballocItem;
5955  if(suballocItem != m_Suballocations.begin())
5956  {
5957  --prevItem;
5958  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5959  {
5960  mergeWithPrev = true;
5961  }
5962  }
5963 
5964  if(mergeWithNext)
5965  {
5966  UnregisterFreeSuballocation(nextItem);
5967  MergeFreeWithNext(suballocItem);
5968  }
5969 
5970  if(mergeWithPrev)
5971  {
5972  UnregisterFreeSuballocation(prevItem);
5973  MergeFreeWithNext(prevItem);
5974  RegisterFreeSuballocation(prevItem);
5975  return prevItem;
5976  }
5977  else
5978  {
5979  RegisterFreeSuballocation(suballocItem);
5980  return suballocItem;
5981  }
5982 }
5983 
5984 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
5985 {
5986  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5987  VMA_ASSERT(item->size > 0);
5988 
5989  // You may want to enable this validation at the beginning or at the end of
5990  // this function, depending on what do you want to check.
5991  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5992 
5993  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5994  {
5995  if(m_FreeSuballocationsBySize.empty())
5996  {
5997  m_FreeSuballocationsBySize.push_back(item);
5998  }
5999  else
6000  {
6001  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
6002  }
6003  }
6004 
6005  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6006 }
6007 
6008 
6009 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
6010 {
6011  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
6012  VMA_ASSERT(item->size > 0);
6013 
6014  // You may want to enable this validation at the beginning or at the end of
6015  // this function, depending on what do you want to check.
6016  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6017 
6018  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6019  {
6020  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
6021  m_FreeSuballocationsBySize.data(),
6022  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
6023  item,
6024  VmaSuballocationItemSizeLess());
6025  for(size_t index = it - m_FreeSuballocationsBySize.data();
6026  index < m_FreeSuballocationsBySize.size();
6027  ++index)
6028  {
6029  if(m_FreeSuballocationsBySize[index] == item)
6030  {
6031  VmaVectorRemove(m_FreeSuballocationsBySize, index);
6032  return;
6033  }
6034  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
6035  }
6036  VMA_ASSERT(0 && "Not found.");
6037  }
6038 
6039  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6040 }
6041 
6043 // class VmaDeviceMemoryMapping
6044 
6045 VmaDeviceMemoryMapping::VmaDeviceMemoryMapping() :
6046  m_MapCount(0),
6047  m_pMappedData(VMA_NULL)
6048 {
6049 }
6050 
6051 VmaDeviceMemoryMapping::~VmaDeviceMemoryMapping()
6052 {
6053  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
6054 }
6055 
6056 VkResult VmaDeviceMemoryMapping::Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count, void **ppData)
6057 {
6058  if(count == 0)
6059  {
6060  return VK_SUCCESS;
6061  }
6062 
6063  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6064  if(m_MapCount != 0)
6065  {
6066  m_MapCount += count;
6067  VMA_ASSERT(m_pMappedData != VMA_NULL);
6068  if(ppData != VMA_NULL)
6069  {
6070  *ppData = m_pMappedData;
6071  }
6072  return VK_SUCCESS;
6073  }
6074  else
6075  {
6076  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
6077  hAllocator->m_hDevice,
6078  hMemory,
6079  0, // offset
6080  VK_WHOLE_SIZE,
6081  0, // flags
6082  &m_pMappedData);
6083  if(result == VK_SUCCESS)
6084  {
6085  if(ppData != VMA_NULL)
6086  {
6087  *ppData = m_pMappedData;
6088  }
6089  m_MapCount = count;
6090  }
6091  return result;
6092  }
6093 }
6094 
6095 void VmaDeviceMemoryMapping::Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count)
6096 {
6097  if(count == 0)
6098  {
6099  return;
6100  }
6101 
6102  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6103  if(m_MapCount >= count)
6104  {
6105  m_MapCount -= count;
6106  if(m_MapCount == 0)
6107  {
6108  m_pMappedData = VMA_NULL;
6109  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, hMemory);
6110  }
6111  }
6112  else
6113  {
6114  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
6115  }
6116 }
6117 
6119 // class VmaDeviceMemoryBlock
6120 
6121 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
6122  m_MemoryTypeIndex(UINT32_MAX),
6123  m_hMemory(VK_NULL_HANDLE),
6124  m_Metadata(hAllocator)
6125 {
6126 }
6127 
6128 void VmaDeviceMemoryBlock::Init(
6129  uint32_t newMemoryTypeIndex,
6130  VkDeviceMemory newMemory,
6131  VkDeviceSize newSize)
6132 {
6133  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
6134 
6135  m_MemoryTypeIndex = newMemoryTypeIndex;
6136  m_hMemory = newMemory;
6137 
6138  m_Metadata.Init(newSize);
6139 }
6140 
6141 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
6142 {
6143  // This is the most important assert in the entire library.
6144  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
6145  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
6146 
6147  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
6148  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
6149  m_hMemory = VK_NULL_HANDLE;
6150 }
6151 
6152 bool VmaDeviceMemoryBlock::Validate() const
6153 {
6154  if((m_hMemory == VK_NULL_HANDLE) ||
6155  (m_Metadata.GetSize() == 0))
6156  {
6157  return false;
6158  }
6159 
6160  return m_Metadata.Validate();
6161 }
6162 
6163 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
6164 {
6165  return m_Mapping.Map(hAllocator, m_hMemory, count, ppData);
6166 }
6167 
6168 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
6169 {
6170  m_Mapping.Unmap(hAllocator, m_hMemory, count);
6171 }
6172 
6173 static void InitStatInfo(VmaStatInfo& outInfo)
6174 {
6175  memset(&outInfo, 0, sizeof(outInfo));
6176  outInfo.allocationSizeMin = UINT64_MAX;
6177  outInfo.unusedRangeSizeMin = UINT64_MAX;
6178 }
6179 
6180 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
6181 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
6182 {
6183  inoutInfo.blockCount += srcInfo.blockCount;
6184  inoutInfo.allocationCount += srcInfo.allocationCount;
6185  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
6186  inoutInfo.usedBytes += srcInfo.usedBytes;
6187  inoutInfo.unusedBytes += srcInfo.unusedBytes;
6188  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
6189  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
6190  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
6191  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
6192 }
6193 
6194 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
6195 {
6196  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
6197  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
6198  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
6199  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
6200 }
6201 
6202 VmaPool_T::VmaPool_T(
6203  VmaAllocator hAllocator,
6204  const VmaPoolCreateInfo& createInfo) :
6205  m_BlockVector(
6206  hAllocator,
6207  createInfo.memoryTypeIndex,
6208  createInfo.blockSize,
6209  createInfo.minBlockCount,
6210  createInfo.maxBlockCount,
6211  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
6212  createInfo.frameInUseCount,
6213  true) // isCustomPool
6214 {
6215 }
6216 
6217 VmaPool_T::~VmaPool_T()
6218 {
6219 }
6220 
6221 #if VMA_STATS_STRING_ENABLED
6222 
6223 #endif // #if VMA_STATS_STRING_ENABLED
6224 
6225 VmaBlockVector::VmaBlockVector(
6226  VmaAllocator hAllocator,
6227  uint32_t memoryTypeIndex,
6228  VkDeviceSize preferredBlockSize,
6229  size_t minBlockCount,
6230  size_t maxBlockCount,
6231  VkDeviceSize bufferImageGranularity,
6232  uint32_t frameInUseCount,
6233  bool isCustomPool) :
6234  m_hAllocator(hAllocator),
6235  m_MemoryTypeIndex(memoryTypeIndex),
6236  m_PreferredBlockSize(preferredBlockSize),
6237  m_MinBlockCount(minBlockCount),
6238  m_MaxBlockCount(maxBlockCount),
6239  m_BufferImageGranularity(bufferImageGranularity),
6240  m_FrameInUseCount(frameInUseCount),
6241  m_IsCustomPool(isCustomPool),
6242  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
6243  m_HasEmptyBlock(false),
6244  m_pDefragmentator(VMA_NULL)
6245 {
6246 }
6247 
6248 VmaBlockVector::~VmaBlockVector()
6249 {
6250  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
6251 
6252  for(size_t i = m_Blocks.size(); i--; )
6253  {
6254  m_Blocks[i]->Destroy(m_hAllocator);
6255  vma_delete(m_hAllocator, m_Blocks[i]);
6256  }
6257 }
6258 
6259 VkResult VmaBlockVector::CreateMinBlocks()
6260 {
6261  for(size_t i = 0; i < m_MinBlockCount; ++i)
6262  {
6263  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
6264  if(res != VK_SUCCESS)
6265  {
6266  return res;
6267  }
6268  }
6269  return VK_SUCCESS;
6270 }
6271 
6272 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
6273 {
6274  pStats->size = 0;
6275  pStats->unusedSize = 0;
6276  pStats->allocationCount = 0;
6277  pStats->unusedRangeCount = 0;
6278  pStats->unusedRangeSizeMax = 0;
6279 
6280  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6281 
6282  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6283  {
6284  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6285  VMA_ASSERT(pBlock);
6286  VMA_HEAVY_ASSERT(pBlock->Validate());
6287  pBlock->m_Metadata.AddPoolStats(*pStats);
6288  }
6289 }
6290 
6291 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
6292 
6293 VkResult VmaBlockVector::Allocate(
6294  VmaPool hCurrentPool,
6295  uint32_t currentFrameIndex,
6296  const VkMemoryRequirements& vkMemReq,
6297  const VmaAllocationCreateInfo& createInfo,
6298  VmaSuballocationType suballocType,
6299  VmaAllocation* pAllocation)
6300 {
6301  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
6302  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
6303 
6304  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6305 
6306  // 1. Search existing allocations. Try to allocate without making other allocations lost.
6307  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6308  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6309  {
6310  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6311  VMA_ASSERT(pCurrBlock);
6312  VmaAllocationRequest currRequest = {};
6313  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6314  currentFrameIndex,
6315  m_FrameInUseCount,
6316  m_BufferImageGranularity,
6317  vkMemReq.size,
6318  vkMemReq.alignment,
6319  suballocType,
6320  false, // canMakeOtherLost
6321  &currRequest))
6322  {
6323  // Allocate from pCurrBlock.
6324  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
6325 
6326  if(mapped)
6327  {
6328  VkResult res = pCurrBlock->Map(m_hAllocator, 1, VMA_NULL);
6329  if(res != VK_SUCCESS)
6330  {
6331  return res;
6332  }
6333  }
6334 
6335  // We no longer have an empty Allocation.
6336  if(pCurrBlock->m_Metadata.IsEmpty())
6337  {
6338  m_HasEmptyBlock = false;
6339  }
6340 
6341  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6342  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
6343  (*pAllocation)->InitBlockAllocation(
6344  hCurrentPool,
6345  pCurrBlock,
6346  currRequest.offset,
6347  vkMemReq.alignment,
6348  vkMemReq.size,
6349  suballocType,
6350  mapped,
6351  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6352  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
6353  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6354  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6355  return VK_SUCCESS;
6356  }
6357  }
6358 
6359  const bool canCreateNewBlock =
6360  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
6361  (m_Blocks.size() < m_MaxBlockCount);
6362 
6363  // 2. Try to create new block.
6364  if(canCreateNewBlock)
6365  {
6366  // Calculate optimal size for new block.
6367  VkDeviceSize newBlockSize = m_PreferredBlockSize;
6368  uint32_t newBlockSizeShift = 0;
6369  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
6370 
6371  // Allocating blocks of other sizes is allowed only in default pools.
6372  // In custom pools block size is fixed.
6373  if(m_IsCustomPool == false)
6374  {
6375  // Allocate 1/8, 1/4, 1/2 as first blocks.
6376  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
6377  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
6378  {
6379  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
6380  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= vkMemReq.size * 2)
6381  {
6382  newBlockSize = smallerNewBlockSize;
6383  ++newBlockSizeShift;
6384  }
6385  else
6386  {
6387  break;
6388  }
6389  }
6390  }
6391 
6392  size_t newBlockIndex = 0;
6393  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
6394  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
6395  if(m_IsCustomPool == false)
6396  {
6397  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
6398  {
6399  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
6400  if(smallerNewBlockSize >= vkMemReq.size)
6401  {
6402  newBlockSize = smallerNewBlockSize;
6403  ++newBlockSizeShift;
6404  res = CreateBlock(newBlockSize, &newBlockIndex);
6405  }
6406  else
6407  {
6408  break;
6409  }
6410  }
6411  }
6412 
6413  if(res == VK_SUCCESS)
6414  {
6415  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
6416  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= vkMemReq.size);
6417 
6418  if(mapped)
6419  {
6420  res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
6421  if(res != VK_SUCCESS)
6422  {
6423  return res;
6424  }
6425  }
6426 
6427  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
6428  VmaAllocationRequest allocRequest;
6429  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
6430  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6431  pBlock->m_Metadata.Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
6432  (*pAllocation)->InitBlockAllocation(
6433  hCurrentPool,
6434  pBlock,
6435  allocRequest.offset,
6436  vkMemReq.alignment,
6437  vkMemReq.size,
6438  suballocType,
6439  mapped,
6440  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6441  VMA_HEAVY_ASSERT(pBlock->Validate());
6442  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
6443  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6444  return VK_SUCCESS;
6445  }
6446  }
6447 
6448  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
6449 
6450  // 3. Try to allocate from existing blocks with making other allocations lost.
6451  if(canMakeOtherLost)
6452  {
6453  uint32_t tryIndex = 0;
6454  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
6455  {
6456  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
6457  VmaAllocationRequest bestRequest = {};
6458  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
6459 
6460  // 1. Search existing allocations.
6461  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6462  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6463  {
6464  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6465  VMA_ASSERT(pCurrBlock);
6466  VmaAllocationRequest currRequest = {};
6467  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6468  currentFrameIndex,
6469  m_FrameInUseCount,
6470  m_BufferImageGranularity,
6471  vkMemReq.size,
6472  vkMemReq.alignment,
6473  suballocType,
6474  canMakeOtherLost,
6475  &currRequest))
6476  {
6477  const VkDeviceSize currRequestCost = currRequest.CalcCost();
6478  if(pBestRequestBlock == VMA_NULL ||
6479  currRequestCost < bestRequestCost)
6480  {
6481  pBestRequestBlock = pCurrBlock;
6482  bestRequest = currRequest;
6483  bestRequestCost = currRequestCost;
6484 
6485  if(bestRequestCost == 0)
6486  {
6487  break;
6488  }
6489  }
6490  }
6491  }
6492 
6493  if(pBestRequestBlock != VMA_NULL)
6494  {
6495  if(mapped)
6496  {
6497  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
6498  if(res != VK_SUCCESS)
6499  {
6500  return res;
6501  }
6502  }
6503 
6504  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
6505  currentFrameIndex,
6506  m_FrameInUseCount,
6507  &bestRequest))
6508  {
6509  // We no longer have an empty Allocation.
6510  if(pBestRequestBlock->m_Metadata.IsEmpty())
6511  {
6512  m_HasEmptyBlock = false;
6513  }
6514  // Allocate from this pBlock.
6515  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6516  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
6517  (*pAllocation)->InitBlockAllocation(
6518  hCurrentPool,
6519  pBestRequestBlock,
6520  bestRequest.offset,
6521  vkMemReq.alignment,
6522  vkMemReq.size,
6523  suballocType,
6524  mapped,
6525  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6526  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
6527  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6528  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6529  return VK_SUCCESS;
6530  }
6531  // else: Some allocations must have been touched while we are here. Next try.
6532  }
6533  else
6534  {
6535  // Could not find place in any of the blocks - break outer loop.
6536  break;
6537  }
6538  }
6539  /* Maximum number of tries exceeded - a very unlike event when many other
6540  threads are simultaneously touching allocations making it impossible to make
6541  lost at the same time as we try to allocate. */
6542  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
6543  {
6544  return VK_ERROR_TOO_MANY_OBJECTS;
6545  }
6546  }
6547 
6548  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6549 }
6550 
6551 void VmaBlockVector::Free(
6552  VmaAllocation hAllocation)
6553 {
6554  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
6555 
6556  // Scope for lock.
6557  {
6558  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6559 
6560  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
6561 
6562  if(hAllocation->IsPersistentMap())
6563  {
6564  pBlock->m_Mapping.Unmap(m_hAllocator, pBlock->m_hMemory, 1);
6565  }
6566 
6567  pBlock->m_Metadata.Free(hAllocation);
6568  VMA_HEAVY_ASSERT(pBlock->Validate());
6569 
6570  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
6571 
6572  // pBlock became empty after this deallocation.
6573  if(pBlock->m_Metadata.IsEmpty())
6574  {
6575  // Already has empty Allocation. We don't want to have two, so delete this one.
6576  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
6577  {
6578  pBlockToDelete = pBlock;
6579  Remove(pBlock);
6580  }
6581  // We now have first empty Allocation.
6582  else
6583  {
6584  m_HasEmptyBlock = true;
6585  }
6586  }
6587  // pBlock didn't become empty, but we have another empty block - find and free that one.
6588  // (This is optional, heuristics.)
6589  else if(m_HasEmptyBlock)
6590  {
6591  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
6592  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
6593  {
6594  pBlockToDelete = pLastBlock;
6595  m_Blocks.pop_back();
6596  m_HasEmptyBlock = false;
6597  }
6598  }
6599 
6600  IncrementallySortBlocks();
6601  }
6602 
6603  // Destruction of a free Allocation. Deferred until this point, outside of mutex
6604  // lock, for performance reason.
6605  if(pBlockToDelete != VMA_NULL)
6606  {
6607  VMA_DEBUG_LOG(" Deleted empty allocation");
6608  pBlockToDelete->Destroy(m_hAllocator);
6609  vma_delete(m_hAllocator, pBlockToDelete);
6610  }
6611 }
6612 
6613 size_t VmaBlockVector::CalcMaxBlockSize() const
6614 {
6615  size_t result = 0;
6616  for(size_t i = m_Blocks.size(); i--; )
6617  {
6618  result = VMA_MAX((uint64_t)result, (uint64_t)m_Blocks[i]->m_Metadata.GetSize());
6619  if(result >= m_PreferredBlockSize)
6620  {
6621  break;
6622  }
6623  }
6624  return result;
6625 }
6626 
6627 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
6628 {
6629  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6630  {
6631  if(m_Blocks[blockIndex] == pBlock)
6632  {
6633  VmaVectorRemove(m_Blocks, blockIndex);
6634  return;
6635  }
6636  }
6637  VMA_ASSERT(0);
6638 }
6639 
6640 void VmaBlockVector::IncrementallySortBlocks()
6641 {
6642  // Bubble sort only until first swap.
6643  for(size_t i = 1; i < m_Blocks.size(); ++i)
6644  {
6645  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
6646  {
6647  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
6648  return;
6649  }
6650  }
6651 }
6652 
6653 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
6654 {
6655  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
6656  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
6657  allocInfo.allocationSize = blockSize;
6658  VkDeviceMemory mem = VK_NULL_HANDLE;
6659  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
6660  if(res < 0)
6661  {
6662  return res;
6663  }
6664 
6665  // New VkDeviceMemory successfully created.
6666 
6667  // Create new Allocation for it.
6668  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
6669  pBlock->Init(
6670  m_MemoryTypeIndex,
6671  mem,
6672  allocInfo.allocationSize);
6673 
6674  m_Blocks.push_back(pBlock);
6675  if(pNewBlockIndex != VMA_NULL)
6676  {
6677  *pNewBlockIndex = m_Blocks.size() - 1;
6678  }
6679 
6680  return VK_SUCCESS;
6681 }
6682 
6683 #if VMA_STATS_STRING_ENABLED
6684 
6685 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
6686 {
6687  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6688 
6689  json.BeginObject();
6690 
6691  if(m_IsCustomPool)
6692  {
6693  json.WriteString("MemoryTypeIndex");
6694  json.WriteNumber(m_MemoryTypeIndex);
6695 
6696  json.WriteString("BlockSize");
6697  json.WriteNumber(m_PreferredBlockSize);
6698 
6699  json.WriteString("BlockCount");
6700  json.BeginObject(true);
6701  if(m_MinBlockCount > 0)
6702  {
6703  json.WriteString("Min");
6704  json.WriteNumber((uint64_t)m_MinBlockCount);
6705  }
6706  if(m_MaxBlockCount < SIZE_MAX)
6707  {
6708  json.WriteString("Max");
6709  json.WriteNumber((uint64_t)m_MaxBlockCount);
6710  }
6711  json.WriteString("Cur");
6712  json.WriteNumber((uint64_t)m_Blocks.size());
6713  json.EndObject();
6714 
6715  if(m_FrameInUseCount > 0)
6716  {
6717  json.WriteString("FrameInUseCount");
6718  json.WriteNumber(m_FrameInUseCount);
6719  }
6720  }
6721  else
6722  {
6723  json.WriteString("PreferredBlockSize");
6724  json.WriteNumber(m_PreferredBlockSize);
6725  }
6726 
6727  json.WriteString("Blocks");
6728  json.BeginArray();
6729  for(size_t i = 0; i < m_Blocks.size(); ++i)
6730  {
6731  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
6732  }
6733  json.EndArray();
6734 
6735  json.EndObject();
6736 }
6737 
6738 #endif // #if VMA_STATS_STRING_ENABLED
6739 
6740 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
6741  VmaAllocator hAllocator,
6742  uint32_t currentFrameIndex)
6743 {
6744  if(m_pDefragmentator == VMA_NULL)
6745  {
6746  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
6747  hAllocator,
6748  this,
6749  currentFrameIndex);
6750  }
6751 
6752  return m_pDefragmentator;
6753 }
6754 
6755 VkResult VmaBlockVector::Defragment(
6756  VmaDefragmentationStats* pDefragmentationStats,
6757  VkDeviceSize& maxBytesToMove,
6758  uint32_t& maxAllocationsToMove)
6759 {
6760  if(m_pDefragmentator == VMA_NULL)
6761  {
6762  return VK_SUCCESS;
6763  }
6764 
6765  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6766 
6767  // Defragment.
6768  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
6769 
6770  // Accumulate statistics.
6771  if(pDefragmentationStats != VMA_NULL)
6772  {
6773  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
6774  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
6775  pDefragmentationStats->bytesMoved += bytesMoved;
6776  pDefragmentationStats->allocationsMoved += allocationsMoved;
6777  VMA_ASSERT(bytesMoved <= maxBytesToMove);
6778  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
6779  maxBytesToMove -= bytesMoved;
6780  maxAllocationsToMove -= allocationsMoved;
6781  }
6782 
6783  // Free empty blocks.
6784  m_HasEmptyBlock = false;
6785  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
6786  {
6787  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
6788  if(pBlock->m_Metadata.IsEmpty())
6789  {
6790  if(m_Blocks.size() > m_MinBlockCount)
6791  {
6792  if(pDefragmentationStats != VMA_NULL)
6793  {
6794  ++pDefragmentationStats->deviceMemoryBlocksFreed;
6795  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
6796  }
6797 
6798  VmaVectorRemove(m_Blocks, blockIndex);
6799  pBlock->Destroy(m_hAllocator);
6800  vma_delete(m_hAllocator, pBlock);
6801  }
6802  else
6803  {
6804  m_HasEmptyBlock = true;
6805  }
6806  }
6807  }
6808 
6809  return result;
6810 }
6811 
6812 void VmaBlockVector::DestroyDefragmentator()
6813 {
6814  if(m_pDefragmentator != VMA_NULL)
6815  {
6816  vma_delete(m_hAllocator, m_pDefragmentator);
6817  m_pDefragmentator = VMA_NULL;
6818  }
6819 }
6820 
6821 void VmaBlockVector::MakePoolAllocationsLost(
6822  uint32_t currentFrameIndex,
6823  size_t* pLostAllocationCount)
6824 {
6825  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6826  size_t lostAllocationCount = 0;
6827  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6828  {
6829  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6830  VMA_ASSERT(pBlock);
6831  lostAllocationCount += pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
6832  }
6833  if(pLostAllocationCount != VMA_NULL)
6834  {
6835  *pLostAllocationCount = lostAllocationCount;
6836  }
6837 }
6838 
6839 void VmaBlockVector::AddStats(VmaStats* pStats)
6840 {
6841  const uint32_t memTypeIndex = m_MemoryTypeIndex;
6842  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
6843 
6844  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6845 
6846  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6847  {
6848  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6849  VMA_ASSERT(pBlock);
6850  VMA_HEAVY_ASSERT(pBlock->Validate());
6851  VmaStatInfo allocationStatInfo;
6852  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
6853  VmaAddStatInfo(pStats->total, allocationStatInfo);
6854  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
6855  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
6856  }
6857 }
6858 
6860 // VmaDefragmentator members definition
6861 
6862 VmaDefragmentator::VmaDefragmentator(
6863  VmaAllocator hAllocator,
6864  VmaBlockVector* pBlockVector,
6865  uint32_t currentFrameIndex) :
6866  m_hAllocator(hAllocator),
6867  m_pBlockVector(pBlockVector),
6868  m_CurrentFrameIndex(currentFrameIndex),
6869  m_BytesMoved(0),
6870  m_AllocationsMoved(0),
6871  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
6872  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
6873 {
6874 }
6875 
6876 VmaDefragmentator::~VmaDefragmentator()
6877 {
6878  for(size_t i = m_Blocks.size(); i--; )
6879  {
6880  vma_delete(m_hAllocator, m_Blocks[i]);
6881  }
6882 }
6883 
6884 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
6885 {
6886  AllocationInfo allocInfo;
6887  allocInfo.m_hAllocation = hAlloc;
6888  allocInfo.m_pChanged = pChanged;
6889  m_Allocations.push_back(allocInfo);
6890 }
6891 
6892 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
6893 {
6894  // It has already been mapped for defragmentation.
6895  if(m_pMappedDataForDefragmentation)
6896  {
6897  *ppMappedData = m_pMappedDataForDefragmentation;
6898  return VK_SUCCESS;
6899  }
6900 
6901  // It is originally mapped.
6902  if(m_pBlock->m_Mapping.GetMappedData())
6903  {
6904  *ppMappedData = m_pBlock->m_Mapping.GetMappedData();
6905  return VK_SUCCESS;
6906  }
6907 
6908  // Map on first usage.
6909  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
6910  *ppMappedData = m_pMappedDataForDefragmentation;
6911  return res;
6912 }
6913 
6914 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
6915 {
6916  if(m_pMappedDataForDefragmentation != VMA_NULL)
6917  {
6918  m_pBlock->Unmap(hAllocator, 1);
6919  }
6920 }
6921 
6922 VkResult VmaDefragmentator::DefragmentRound(
6923  VkDeviceSize maxBytesToMove,
6924  uint32_t maxAllocationsToMove)
6925 {
6926  if(m_Blocks.empty())
6927  {
6928  return VK_SUCCESS;
6929  }
6930 
6931  size_t srcBlockIndex = m_Blocks.size() - 1;
6932  size_t srcAllocIndex = SIZE_MAX;
6933  for(;;)
6934  {
6935  // 1. Find next allocation to move.
6936  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
6937  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
6938  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
6939  {
6940  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
6941  {
6942  // Finished: no more allocations to process.
6943  if(srcBlockIndex == 0)
6944  {
6945  return VK_SUCCESS;
6946  }
6947  else
6948  {
6949  --srcBlockIndex;
6950  srcAllocIndex = SIZE_MAX;
6951  }
6952  }
6953  else
6954  {
6955  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
6956  }
6957  }
6958 
6959  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
6960  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
6961 
6962  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
6963  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
6964  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
6965  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
6966 
6967  // 2. Try to find new place for this allocation in preceding or current block.
6968  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
6969  {
6970  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
6971  VmaAllocationRequest dstAllocRequest;
6972  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
6973  m_CurrentFrameIndex,
6974  m_pBlockVector->GetFrameInUseCount(),
6975  m_pBlockVector->GetBufferImageGranularity(),
6976  size,
6977  alignment,
6978  suballocType,
6979  false, // canMakeOtherLost
6980  &dstAllocRequest) &&
6981  MoveMakesSense(
6982  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
6983  {
6984  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
6985 
6986  // Reached limit on number of allocations or bytes to move.
6987  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
6988  (m_BytesMoved + size > maxBytesToMove))
6989  {
6990  return VK_INCOMPLETE;
6991  }
6992 
6993  void* pDstMappedData = VMA_NULL;
6994  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
6995  if(res != VK_SUCCESS)
6996  {
6997  return res;
6998  }
6999 
7000  void* pSrcMappedData = VMA_NULL;
7001  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
7002  if(res != VK_SUCCESS)
7003  {
7004  return res;
7005  }
7006 
7007  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
7008  memcpy(
7009  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
7010  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
7011  static_cast<size_t>(size));
7012 
7013  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
7014  pSrcBlockInfo->m_pBlock->m_Metadata.FreeAtOffset(srcOffset);
7015 
7016  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
7017 
7018  if(allocInfo.m_pChanged != VMA_NULL)
7019  {
7020  *allocInfo.m_pChanged = VK_TRUE;
7021  }
7022 
7023  ++m_AllocationsMoved;
7024  m_BytesMoved += size;
7025 
7026  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
7027 
7028  break;
7029  }
7030  }
7031 
7032  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
7033 
7034  if(srcAllocIndex > 0)
7035  {
7036  --srcAllocIndex;
7037  }
7038  else
7039  {
7040  if(srcBlockIndex > 0)
7041  {
7042  --srcBlockIndex;
7043  srcAllocIndex = SIZE_MAX;
7044  }
7045  else
7046  {
7047  return VK_SUCCESS;
7048  }
7049  }
7050  }
7051 }
7052 
7053 VkResult VmaDefragmentator::Defragment(
7054  VkDeviceSize maxBytesToMove,
7055  uint32_t maxAllocationsToMove)
7056 {
7057  if(m_Allocations.empty())
7058  {
7059  return VK_SUCCESS;
7060  }
7061 
7062  // Create block info for each block.
7063  const size_t blockCount = m_pBlockVector->m_Blocks.size();
7064  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7065  {
7066  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
7067  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
7068  m_Blocks.push_back(pBlockInfo);
7069  }
7070 
7071  // Sort them by m_pBlock pointer value.
7072  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
7073 
7074  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
7075  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
7076  {
7077  AllocationInfo& allocInfo = m_Allocations[blockIndex];
7078  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
7079  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
7080  {
7081  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
7082  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
7083  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
7084  {
7085  (*it)->m_Allocations.push_back(allocInfo);
7086  }
7087  else
7088  {
7089  VMA_ASSERT(0);
7090  }
7091  }
7092  }
7093  m_Allocations.clear();
7094 
7095  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7096  {
7097  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
7098  pBlockInfo->CalcHasNonMovableAllocations();
7099  pBlockInfo->SortAllocationsBySizeDescecnding();
7100  }
7101 
7102  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
7103  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
7104 
7105  // Execute defragmentation rounds (the main part).
7106  VkResult result = VK_SUCCESS;
7107  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
7108  {
7109  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
7110  }
7111 
7112  // Unmap blocks that were mapped for defragmentation.
7113  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7114  {
7115  m_Blocks[blockIndex]->Unmap(m_hAllocator);
7116  }
7117 
7118  return result;
7119 }
7120 
7121 bool VmaDefragmentator::MoveMakesSense(
7122  size_t dstBlockIndex, VkDeviceSize dstOffset,
7123  size_t srcBlockIndex, VkDeviceSize srcOffset)
7124 {
7125  if(dstBlockIndex < srcBlockIndex)
7126  {
7127  return true;
7128  }
7129  if(dstBlockIndex > srcBlockIndex)
7130  {
7131  return false;
7132  }
7133  if(dstOffset < srcOffset)
7134  {
7135  return true;
7136  }
7137  return false;
7138 }
7139 
7141 // VmaAllocator_T
7142 
7143 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
7144  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
7145  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
7146  m_hDevice(pCreateInfo->device),
7147  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
7148  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
7149  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
7150  m_PreferredLargeHeapBlockSize(0),
7151  m_PhysicalDevice(pCreateInfo->physicalDevice),
7152  m_CurrentFrameIndex(0),
7153  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
7154 {
7155  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
7156 
7157  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
7158  memset(&m_MemProps, 0, sizeof(m_MemProps));
7159  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
7160 
7161  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
7162  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
7163 
7164  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7165  {
7166  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
7167  }
7168 
7169  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
7170  {
7171  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
7172  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
7173  }
7174 
7175  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
7176 
7177  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
7178  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
7179 
7180  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
7181  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
7182 
7183  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
7184  {
7185  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
7186  {
7187  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
7188  if(limit != VK_WHOLE_SIZE)
7189  {
7190  m_HeapSizeLimit[heapIndex] = limit;
7191  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
7192  {
7193  m_MemProps.memoryHeaps[heapIndex].size = limit;
7194  }
7195  }
7196  }
7197  }
7198 
7199  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7200  {
7201  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
7202 
7203  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
7204  this,
7205  memTypeIndex,
7206  preferredBlockSize,
7207  0,
7208  SIZE_MAX,
7209  GetBufferImageGranularity(),
7210  pCreateInfo->frameInUseCount,
7211  false); // isCustomPool
7212  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
7213  // becase minBlockCount is 0.
7214  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
7215  }
7216 }
7217 
7218 VmaAllocator_T::~VmaAllocator_T()
7219 {
7220  VMA_ASSERT(m_Pools.empty());
7221 
7222  for(size_t i = GetMemoryTypeCount(); i--; )
7223  {
7224  vma_delete(this, m_pDedicatedAllocations[i]);
7225  vma_delete(this, m_pBlockVectors[i]);
7226  }
7227 }
7228 
7229 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
7230 {
7231 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7232  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
7233  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
7234  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
7235  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
7236  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
7237  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
7238  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
7239  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
7240  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
7241  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
7242  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
7243  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
7244  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
7245  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
7246  if(m_UseKhrDedicatedAllocation)
7247  {
7248  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
7249  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
7250  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
7251  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
7252  }
7253 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7254 
7255 #define VMA_COPY_IF_NOT_NULL(funcName) \
7256  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
7257 
7258  if(pVulkanFunctions != VMA_NULL)
7259  {
7260  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
7261  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
7262  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
7263  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
7264  VMA_COPY_IF_NOT_NULL(vkMapMemory);
7265  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
7266  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
7267  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
7268  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
7269  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
7270  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
7271  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
7272  VMA_COPY_IF_NOT_NULL(vkCreateImage);
7273  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
7274  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
7275  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
7276  }
7277 
7278 #undef VMA_COPY_IF_NOT_NULL
7279 
7280  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
7281  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
7282  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
7283  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
7284  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
7285  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
7286  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
7287  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
7288  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
7289  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
7290  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
7291  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
7292  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
7293  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
7294  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
7295  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
7296  if(m_UseKhrDedicatedAllocation)
7297  {
7298  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
7299  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
7300  }
7301 }
7302 
7303 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
7304 {
7305  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7306  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
7307  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
7308  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
7309 }
7310 
7311 VkResult VmaAllocator_T::AllocateMemoryOfType(
7312  const VkMemoryRequirements& vkMemReq,
7313  bool dedicatedAllocation,
7314  VkBuffer dedicatedBuffer,
7315  VkImage dedicatedImage,
7316  const VmaAllocationCreateInfo& createInfo,
7317  uint32_t memTypeIndex,
7318  VmaSuballocationType suballocType,
7319  VmaAllocation* pAllocation)
7320 {
7321  VMA_ASSERT(pAllocation != VMA_NULL);
7322  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
7323 
7324  VmaAllocationCreateInfo finalCreateInfo = createInfo;
7325 
7326  // If memory type is not HOST_VISIBLE, disable MAPPED.
7327  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7328  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
7329  {
7330  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
7331  }
7332 
7333  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
7334  VMA_ASSERT(blockVector);
7335 
7336  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
7337  bool preferDedicatedMemory =
7338  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
7339  dedicatedAllocation ||
7340  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
7341  vkMemReq.size > preferredBlockSize / 2;
7342 
7343  if(preferDedicatedMemory &&
7344  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
7345  finalCreateInfo.pool == VK_NULL_HANDLE)
7346  {
7348  }
7349 
7350  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
7351  {
7352  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7353  {
7354  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7355  }
7356  else
7357  {
7358  return AllocateDedicatedMemory(
7359  vkMemReq.size,
7360  suballocType,
7361  memTypeIndex,
7362  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
7363  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
7364  finalCreateInfo.pUserData,
7365  dedicatedBuffer,
7366  dedicatedImage,
7367  pAllocation);
7368  }
7369  }
7370  else
7371  {
7372  VkResult res = blockVector->Allocate(
7373  VK_NULL_HANDLE, // hCurrentPool
7374  m_CurrentFrameIndex.load(),
7375  vkMemReq,
7376  finalCreateInfo,
7377  suballocType,
7378  pAllocation);
7379  if(res == VK_SUCCESS)
7380  {
7381  return res;
7382  }
7383 
7384  // 5. Try dedicated memory.
7385  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7386  {
7387  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7388  }
7389  else
7390  {
7391  res = AllocateDedicatedMemory(
7392  vkMemReq.size,
7393  suballocType,
7394  memTypeIndex,
7395  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
7396  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
7397  finalCreateInfo.pUserData,
7398  dedicatedBuffer,
7399  dedicatedImage,
7400  pAllocation);
7401  if(res == VK_SUCCESS)
7402  {
7403  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
7404  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
7405  return VK_SUCCESS;
7406  }
7407  else
7408  {
7409  // Everything failed: Return error code.
7410  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7411  return res;
7412  }
7413  }
7414  }
7415 }
7416 
7417 VkResult VmaAllocator_T::AllocateDedicatedMemory(
7418  VkDeviceSize size,
7419  VmaSuballocationType suballocType,
7420  uint32_t memTypeIndex,
7421  bool map,
7422  bool isUserDataString,
7423  void* pUserData,
7424  VkBuffer dedicatedBuffer,
7425  VkImage dedicatedImage,
7426  VmaAllocation* pAllocation)
7427 {
7428  VMA_ASSERT(pAllocation);
7429 
7430  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
7431  allocInfo.memoryTypeIndex = memTypeIndex;
7432  allocInfo.allocationSize = size;
7433 
7434  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
7435  if(m_UseKhrDedicatedAllocation)
7436  {
7437  if(dedicatedBuffer != VK_NULL_HANDLE)
7438  {
7439  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
7440  dedicatedAllocInfo.buffer = dedicatedBuffer;
7441  allocInfo.pNext = &dedicatedAllocInfo;
7442  }
7443  else if(dedicatedImage != VK_NULL_HANDLE)
7444  {
7445  dedicatedAllocInfo.image = dedicatedImage;
7446  allocInfo.pNext = &dedicatedAllocInfo;
7447  }
7448  }
7449 
7450  // Allocate VkDeviceMemory.
7451  VkDeviceMemory hMemory = VK_NULL_HANDLE;
7452  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
7453  if(res < 0)
7454  {
7455  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7456  return res;
7457  }
7458 
7459  void* pMappedData = VMA_NULL;
7460  if(map)
7461  {
7462  res = (*m_VulkanFunctions.vkMapMemory)(
7463  m_hDevice,
7464  hMemory,
7465  0,
7466  VK_WHOLE_SIZE,
7467  0,
7468  &pMappedData);
7469  if(res < 0)
7470  {
7471  VMA_DEBUG_LOG(" vkMapMemory FAILED");
7472  FreeVulkanMemory(memTypeIndex, size, hMemory);
7473  return res;
7474  }
7475  }
7476 
7477  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
7478  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
7479  (*pAllocation)->SetUserData(this, pUserData);
7480 
7481  // Register it in m_pDedicatedAllocations.
7482  {
7483  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7484  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
7485  VMA_ASSERT(pDedicatedAllocations);
7486  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
7487  }
7488 
7489  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
7490 
7491  return VK_SUCCESS;
7492 }
7493 
7494 void VmaAllocator_T::GetBufferMemoryRequirements(
7495  VkBuffer hBuffer,
7496  VkMemoryRequirements& memReq,
7497  bool& requiresDedicatedAllocation,
7498  bool& prefersDedicatedAllocation) const
7499 {
7500  if(m_UseKhrDedicatedAllocation)
7501  {
7502  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
7503  memReqInfo.buffer = hBuffer;
7504 
7505  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7506 
7507  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7508  memReq2.pNext = &memDedicatedReq;
7509 
7510  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7511 
7512  memReq = memReq2.memoryRequirements;
7513  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7514  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7515  }
7516  else
7517  {
7518  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
7519  requiresDedicatedAllocation = false;
7520  prefersDedicatedAllocation = false;
7521  }
7522 }
7523 
7524 void VmaAllocator_T::GetImageMemoryRequirements(
7525  VkImage hImage,
7526  VkMemoryRequirements& memReq,
7527  bool& requiresDedicatedAllocation,
7528  bool& prefersDedicatedAllocation) const
7529 {
7530  if(m_UseKhrDedicatedAllocation)
7531  {
7532  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
7533  memReqInfo.image = hImage;
7534 
7535  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7536 
7537  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7538  memReq2.pNext = &memDedicatedReq;
7539 
7540  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7541 
7542  memReq = memReq2.memoryRequirements;
7543  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7544  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7545  }
7546  else
7547  {
7548  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
7549  requiresDedicatedAllocation = false;
7550  prefersDedicatedAllocation = false;
7551  }
7552 }
7553 
7554 VkResult VmaAllocator_T::AllocateMemory(
7555  const VkMemoryRequirements& vkMemReq,
7556  bool requiresDedicatedAllocation,
7557  bool prefersDedicatedAllocation,
7558  VkBuffer dedicatedBuffer,
7559  VkImage dedicatedImage,
7560  const VmaAllocationCreateInfo& createInfo,
7561  VmaSuballocationType suballocType,
7562  VmaAllocation* pAllocation)
7563 {
7564  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
7565  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7566  {
7567  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
7568  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7569  }
7570  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7572  {
7573  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
7574  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7575  }
7576  if(requiresDedicatedAllocation)
7577  {
7578  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7579  {
7580  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
7581  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7582  }
7583  if(createInfo.pool != VK_NULL_HANDLE)
7584  {
7585  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
7586  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7587  }
7588  }
7589  if((createInfo.pool != VK_NULL_HANDLE) &&
7590  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
7591  {
7592  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
7593  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7594  }
7595 
7596  if(createInfo.pool != VK_NULL_HANDLE)
7597  {
7598  return createInfo.pool->m_BlockVector.Allocate(
7599  createInfo.pool,
7600  m_CurrentFrameIndex.load(),
7601  vkMemReq,
7602  createInfo,
7603  suballocType,
7604  pAllocation);
7605  }
7606  else
7607  {
7608  // Bit mask of memory Vulkan types acceptable for this allocation.
7609  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
7610  uint32_t memTypeIndex = UINT32_MAX;
7611  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7612  if(res == VK_SUCCESS)
7613  {
7614  res = AllocateMemoryOfType(
7615  vkMemReq,
7616  requiresDedicatedAllocation || prefersDedicatedAllocation,
7617  dedicatedBuffer,
7618  dedicatedImage,
7619  createInfo,
7620  memTypeIndex,
7621  suballocType,
7622  pAllocation);
7623  // Succeeded on first try.
7624  if(res == VK_SUCCESS)
7625  {
7626  return res;
7627  }
7628  // Allocation from this memory type failed. Try other compatible memory types.
7629  else
7630  {
7631  for(;;)
7632  {
7633  // Remove old memTypeIndex from list of possibilities.
7634  memoryTypeBits &= ~(1u << memTypeIndex);
7635  // Find alternative memTypeIndex.
7636  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7637  if(res == VK_SUCCESS)
7638  {
7639  res = AllocateMemoryOfType(
7640  vkMemReq,
7641  requiresDedicatedAllocation || prefersDedicatedAllocation,
7642  dedicatedBuffer,
7643  dedicatedImage,
7644  createInfo,
7645  memTypeIndex,
7646  suballocType,
7647  pAllocation);
7648  // Allocation from this alternative memory type succeeded.
7649  if(res == VK_SUCCESS)
7650  {
7651  return res;
7652  }
7653  // else: Allocation from this memory type failed. Try next one - next loop iteration.
7654  }
7655  // No other matching memory type index could be found.
7656  else
7657  {
7658  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
7659  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7660  }
7661  }
7662  }
7663  }
7664  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
7665  else
7666  return res;
7667  }
7668 }
7669 
7670 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
7671 {
7672  VMA_ASSERT(allocation);
7673 
7674  if(allocation->CanBecomeLost() == false ||
7675  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
7676  {
7677  switch(allocation->GetType())
7678  {
7679  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7680  {
7681  VmaBlockVector* pBlockVector = VMA_NULL;
7682  VmaPool hPool = allocation->GetPool();
7683  if(hPool != VK_NULL_HANDLE)
7684  {
7685  pBlockVector = &hPool->m_BlockVector;
7686  }
7687  else
7688  {
7689  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
7690  pBlockVector = m_pBlockVectors[memTypeIndex];
7691  }
7692  pBlockVector->Free(allocation);
7693  }
7694  break;
7695  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7696  FreeDedicatedMemory(allocation);
7697  break;
7698  default:
7699  VMA_ASSERT(0);
7700  }
7701  }
7702 
7703  allocation->SetUserData(this, VMA_NULL);
7704  vma_delete(this, allocation);
7705 }
7706 
7707 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
7708 {
7709  // Initialize.
7710  InitStatInfo(pStats->total);
7711  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
7712  InitStatInfo(pStats->memoryType[i]);
7713  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7714  InitStatInfo(pStats->memoryHeap[i]);
7715 
7716  // Process default pools.
7717  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7718  {
7719  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
7720  VMA_ASSERT(pBlockVector);
7721  pBlockVector->AddStats(pStats);
7722  }
7723 
7724  // Process custom pools.
7725  {
7726  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7727  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
7728  {
7729  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
7730  }
7731  }
7732 
7733  // Process dedicated allocations.
7734  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7735  {
7736  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7737  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7738  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
7739  VMA_ASSERT(pDedicatedAllocVector);
7740  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
7741  {
7742  VmaStatInfo allocationStatInfo;
7743  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
7744  VmaAddStatInfo(pStats->total, allocationStatInfo);
7745  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
7746  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
7747  }
7748  }
7749 
7750  // Postprocess.
7751  VmaPostprocessCalcStatInfo(pStats->total);
7752  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
7753  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
7754  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
7755  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
7756 }
7757 
7758 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
7759 
7760 VkResult VmaAllocator_T::Defragment(
7761  VmaAllocation* pAllocations,
7762  size_t allocationCount,
7763  VkBool32* pAllocationsChanged,
7764  const VmaDefragmentationInfo* pDefragmentationInfo,
7765  VmaDefragmentationStats* pDefragmentationStats)
7766 {
7767  if(pAllocationsChanged != VMA_NULL)
7768  {
7769  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
7770  }
7771  if(pDefragmentationStats != VMA_NULL)
7772  {
7773  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
7774  }
7775 
7776  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
7777 
7778  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
7779 
7780  const size_t poolCount = m_Pools.size();
7781 
7782  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
7783  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
7784  {
7785  VmaAllocation hAlloc = pAllocations[allocIndex];
7786  VMA_ASSERT(hAlloc);
7787  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
7788  // DedicatedAlloc cannot be defragmented.
7789  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
7790  // Only HOST_VISIBLE memory types can be defragmented.
7791  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
7792  // Lost allocation cannot be defragmented.
7793  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
7794  {
7795  VmaBlockVector* pAllocBlockVector = VMA_NULL;
7796 
7797  const VmaPool hAllocPool = hAlloc->GetPool();
7798  // This allocation belongs to custom pool.
7799  if(hAllocPool != VK_NULL_HANDLE)
7800  {
7801  pAllocBlockVector = &hAllocPool->GetBlockVector();
7802  }
7803  // This allocation belongs to general pool.
7804  else
7805  {
7806  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
7807  }
7808 
7809  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
7810 
7811  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
7812  &pAllocationsChanged[allocIndex] : VMA_NULL;
7813  pDefragmentator->AddAllocation(hAlloc, pChanged);
7814  }
7815  }
7816 
7817  VkResult result = VK_SUCCESS;
7818 
7819  // ======== Main processing.
7820 
7821  VkDeviceSize maxBytesToMove = SIZE_MAX;
7822  uint32_t maxAllocationsToMove = UINT32_MAX;
7823  if(pDefragmentationInfo != VMA_NULL)
7824  {
7825  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
7826  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
7827  }
7828 
7829  // Process standard memory.
7830  for(uint32_t memTypeIndex = 0;
7831  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
7832  ++memTypeIndex)
7833  {
7834  // Only HOST_VISIBLE memory types can be defragmented.
7835  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7836  {
7837  result = m_pBlockVectors[memTypeIndex]->Defragment(
7838  pDefragmentationStats,
7839  maxBytesToMove,
7840  maxAllocationsToMove);
7841  }
7842  }
7843 
7844  // Process custom pools.
7845  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
7846  {
7847  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
7848  pDefragmentationStats,
7849  maxBytesToMove,
7850  maxAllocationsToMove);
7851  }
7852 
7853  // ======== Destroy defragmentators.
7854 
7855  // Process custom pools.
7856  for(size_t poolIndex = poolCount; poolIndex--; )
7857  {
7858  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
7859  }
7860 
7861  // Process standard memory.
7862  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
7863  {
7864  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7865  {
7866  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
7867  }
7868  }
7869 
7870  return result;
7871 }
7872 
7873 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
7874 {
7875  if(hAllocation->CanBecomeLost())
7876  {
7877  /*
7878  Warning: This is a carefully designed algorithm.
7879  Do not modify unless you really know what you're doing :)
7880  */
7881  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
7882  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
7883  for(;;)
7884  {
7885  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7886  {
7887  pAllocationInfo->memoryType = UINT32_MAX;
7888  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
7889  pAllocationInfo->offset = 0;
7890  pAllocationInfo->size = hAllocation->GetSize();
7891  pAllocationInfo->pMappedData = VMA_NULL;
7892  pAllocationInfo->pUserData = hAllocation->GetUserData();
7893  return;
7894  }
7895  else if(localLastUseFrameIndex == localCurrFrameIndex)
7896  {
7897  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7898  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7899  pAllocationInfo->offset = hAllocation->GetOffset();
7900  pAllocationInfo->size = hAllocation->GetSize();
7901  pAllocationInfo->pMappedData = VMA_NULL;
7902  pAllocationInfo->pUserData = hAllocation->GetUserData();
7903  return;
7904  }
7905  else // Last use time earlier than current time.
7906  {
7907  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
7908  {
7909  localLastUseFrameIndex = localCurrFrameIndex;
7910  }
7911  }
7912  }
7913  }
7914  else
7915  {
7916  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7917  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7918  pAllocationInfo->offset = hAllocation->GetOffset();
7919  pAllocationInfo->size = hAllocation->GetSize();
7920  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
7921  pAllocationInfo->pUserData = hAllocation->GetUserData();
7922  }
7923 }
7924 
7925 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
7926 {
7927  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
7928  if(hAllocation->CanBecomeLost())
7929  {
7930  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
7931  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
7932  for(;;)
7933  {
7934  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7935  {
7936  return false;
7937  }
7938  else if(localLastUseFrameIndex == localCurrFrameIndex)
7939  {
7940  return true;
7941  }
7942  else // Last use time earlier than current time.
7943  {
7944  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
7945  {
7946  localLastUseFrameIndex = localCurrFrameIndex;
7947  }
7948  }
7949  }
7950  }
7951  else
7952  {
7953  return true;
7954  }
7955 }
7956 
7957 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
7958 {
7959  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
7960 
7961  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
7962 
7963  if(newCreateInfo.maxBlockCount == 0)
7964  {
7965  newCreateInfo.maxBlockCount = SIZE_MAX;
7966  }
7967  if(newCreateInfo.blockSize == 0)
7968  {
7969  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
7970  }
7971 
7972  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
7973 
7974  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
7975  if(res != VK_SUCCESS)
7976  {
7977  vma_delete(this, *pPool);
7978  *pPool = VMA_NULL;
7979  return res;
7980  }
7981 
7982  // Add to m_Pools.
7983  {
7984  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7985  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
7986  }
7987 
7988  return VK_SUCCESS;
7989 }
7990 
7991 void VmaAllocator_T::DestroyPool(VmaPool pool)
7992 {
7993  // Remove from m_Pools.
7994  {
7995  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7996  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
7997  VMA_ASSERT(success && "Pool not found in Allocator.");
7998  }
7999 
8000  vma_delete(this, pool);
8001 }
8002 
8003 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
8004 {
8005  pool->m_BlockVector.GetPoolStats(pPoolStats);
8006 }
8007 
8008 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
8009 {
8010  m_CurrentFrameIndex.store(frameIndex);
8011 }
8012 
8013 void VmaAllocator_T::MakePoolAllocationsLost(
8014  VmaPool hPool,
8015  size_t* pLostAllocationCount)
8016 {
8017  hPool->m_BlockVector.MakePoolAllocationsLost(
8018  m_CurrentFrameIndex.load(),
8019  pLostAllocationCount);
8020 }
8021 
8022 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
8023 {
8024  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
8025  (*pAllocation)->InitLost();
8026 }
8027 
8028 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
8029 {
8030  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
8031 
8032  VkResult res;
8033  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
8034  {
8035  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
8036  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
8037  {
8038  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
8039  if(res == VK_SUCCESS)
8040  {
8041  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
8042  }
8043  }
8044  else
8045  {
8046  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
8047  }
8048  }
8049  else
8050  {
8051  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
8052  }
8053 
8054  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
8055  {
8056  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
8057  }
8058 
8059  return res;
8060 }
8061 
8062 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
8063 {
8064  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
8065  {
8066  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
8067  }
8068 
8069  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
8070 
8071  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
8072  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
8073  {
8074  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
8075  m_HeapSizeLimit[heapIndex] += size;
8076  }
8077 }
8078 
8079 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
8080 {
8081  if(hAllocation->CanBecomeLost())
8082  {
8083  return VK_ERROR_MEMORY_MAP_FAILED;
8084  }
8085 
8086  switch(hAllocation->GetType())
8087  {
8088  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8089  {
8090  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
8091  char *pBytes = VMA_NULL;
8092  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
8093  if(res == VK_SUCCESS)
8094  {
8095  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
8096  hAllocation->BlockAllocMap();
8097  }
8098  return res;
8099  }
8100  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8101  return hAllocation->DedicatedAllocMap(this, ppData);
8102  default:
8103  VMA_ASSERT(0);
8104  return VK_ERROR_MEMORY_MAP_FAILED;
8105  }
8106 }
8107 
8108 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
8109 {
8110  switch(hAllocation->GetType())
8111  {
8112  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8113  {
8114  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
8115  hAllocation->BlockAllocUnmap();
8116  pBlock->Unmap(this, 1);
8117  }
8118  break;
8119  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8120  hAllocation->DedicatedAllocUnmap(this);
8121  break;
8122  default:
8123  VMA_ASSERT(0);
8124  }
8125 }
8126 
8127 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
8128 {
8129  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
8130 
8131  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
8132  {
8133  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
8134  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
8135  VMA_ASSERT(pDedicatedAllocations);
8136  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
8137  VMA_ASSERT(success);
8138  }
8139 
8140  VkDeviceMemory hMemory = allocation->GetMemory();
8141 
8142  if(allocation->GetMappedData() != VMA_NULL)
8143  {
8144  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
8145  }
8146 
8147  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
8148 
8149  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
8150 }
8151 
8152 #if VMA_STATS_STRING_ENABLED
8153 
8154 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
8155 {
8156  bool dedicatedAllocationsStarted = false;
8157  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8158  {
8159  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
8160  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
8161  VMA_ASSERT(pDedicatedAllocVector);
8162  if(pDedicatedAllocVector->empty() == false)
8163  {
8164  if(dedicatedAllocationsStarted == false)
8165  {
8166  dedicatedAllocationsStarted = true;
8167  json.WriteString("DedicatedAllocations");
8168  json.BeginObject();
8169  }
8170 
8171  json.BeginString("Type ");
8172  json.ContinueString(memTypeIndex);
8173  json.EndString();
8174 
8175  json.BeginArray();
8176 
8177  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
8178  {
8179  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
8180  json.BeginObject(true);
8181 
8182  json.WriteString("Type");
8183  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
8184 
8185  json.WriteString("Size");
8186  json.WriteNumber(hAlloc->GetSize());
8187 
8188  const void* pUserData = hAlloc->GetUserData();
8189  if(pUserData != VMA_NULL)
8190  {
8191  json.WriteString("UserData");
8192  if(hAlloc->IsUserDataString())
8193  {
8194  json.WriteString((const char*)pUserData);
8195  }
8196  else
8197  {
8198  json.BeginString();
8199  json.ContinueString_Pointer(pUserData);
8200  json.EndString();
8201  }
8202  }
8203 
8204  json.EndObject();
8205  }
8206 
8207  json.EndArray();
8208  }
8209  }
8210  if(dedicatedAllocationsStarted)
8211  {
8212  json.EndObject();
8213  }
8214 
8215  {
8216  bool allocationsStarted = false;
8217  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8218  {
8219  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
8220  {
8221  if(allocationsStarted == false)
8222  {
8223  allocationsStarted = true;
8224  json.WriteString("DefaultPools");
8225  json.BeginObject();
8226  }
8227 
8228  json.BeginString("Type ");
8229  json.ContinueString(memTypeIndex);
8230  json.EndString();
8231 
8232  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
8233  }
8234  }
8235  if(allocationsStarted)
8236  {
8237  json.EndObject();
8238  }
8239  }
8240 
8241  {
8242  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
8243  const size_t poolCount = m_Pools.size();
8244  if(poolCount > 0)
8245  {
8246  json.WriteString("Pools");
8247  json.BeginArray();
8248  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
8249  {
8250  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
8251  }
8252  json.EndArray();
8253  }
8254  }
8255 }
8256 
8257 #endif // #if VMA_STATS_STRING_ENABLED
8258 
8259 static VkResult AllocateMemoryForImage(
8260  VmaAllocator allocator,
8261  VkImage image,
8262  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8263  VmaSuballocationType suballocType,
8264  VmaAllocation* pAllocation)
8265 {
8266  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
8267 
8268  VkMemoryRequirements vkMemReq = {};
8269  bool requiresDedicatedAllocation = false;
8270  bool prefersDedicatedAllocation = false;
8271  allocator->GetImageMemoryRequirements(image, vkMemReq,
8272  requiresDedicatedAllocation, prefersDedicatedAllocation);
8273 
8274  return allocator->AllocateMemory(
8275  vkMemReq,
8276  requiresDedicatedAllocation,
8277  prefersDedicatedAllocation,
8278  VK_NULL_HANDLE, // dedicatedBuffer
8279  image, // dedicatedImage
8280  *pAllocationCreateInfo,
8281  suballocType,
8282  pAllocation);
8283 }
8284 
8286 // Public interface
8287 
8288 VkResult vmaCreateAllocator(
8289  const VmaAllocatorCreateInfo* pCreateInfo,
8290  VmaAllocator* pAllocator)
8291 {
8292  VMA_ASSERT(pCreateInfo && pAllocator);
8293  VMA_DEBUG_LOG("vmaCreateAllocator");
8294  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
8295  return VK_SUCCESS;
8296 }
8297 
8298 void vmaDestroyAllocator(
8299  VmaAllocator allocator)
8300 {
8301  if(allocator != VK_NULL_HANDLE)
8302  {
8303  VMA_DEBUG_LOG("vmaDestroyAllocator");
8304  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
8305  vma_delete(&allocationCallbacks, allocator);
8306  }
8307 }
8308 
8310  VmaAllocator allocator,
8311  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
8312 {
8313  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
8314  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
8315 }
8316 
8318  VmaAllocator allocator,
8319  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
8320 {
8321  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
8322  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
8323 }
8324 
8326  VmaAllocator allocator,
8327  uint32_t memoryTypeIndex,
8328  VkMemoryPropertyFlags* pFlags)
8329 {
8330  VMA_ASSERT(allocator && pFlags);
8331  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
8332  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
8333 }
8334 
8336  VmaAllocator allocator,
8337  uint32_t frameIndex)
8338 {
8339  VMA_ASSERT(allocator);
8340  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
8341 
8342  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8343 
8344  allocator->SetCurrentFrameIndex(frameIndex);
8345 }
8346 
8347 void vmaCalculateStats(
8348  VmaAllocator allocator,
8349  VmaStats* pStats)
8350 {
8351  VMA_ASSERT(allocator && pStats);
8352  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8353  allocator->CalculateStats(pStats);
8354 }
8355 
8356 #if VMA_STATS_STRING_ENABLED
8357 
8358 void vmaBuildStatsString(
8359  VmaAllocator allocator,
8360  char** ppStatsString,
8361  VkBool32 detailedMap)
8362 {
8363  VMA_ASSERT(allocator && ppStatsString);
8364  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8365 
8366  VmaStringBuilder sb(allocator);
8367  {
8368  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
8369  json.BeginObject();
8370 
8371  VmaStats stats;
8372  allocator->CalculateStats(&stats);
8373 
8374  json.WriteString("Total");
8375  VmaPrintStatInfo(json, stats.total);
8376 
8377  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
8378  {
8379  json.BeginString("Heap ");
8380  json.ContinueString(heapIndex);
8381  json.EndString();
8382  json.BeginObject();
8383 
8384  json.WriteString("Size");
8385  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
8386 
8387  json.WriteString("Flags");
8388  json.BeginArray(true);
8389  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
8390  {
8391  json.WriteString("DEVICE_LOCAL");
8392  }
8393  json.EndArray();
8394 
8395  if(stats.memoryHeap[heapIndex].blockCount > 0)
8396  {
8397  json.WriteString("Stats");
8398  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
8399  }
8400 
8401  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
8402  {
8403  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
8404  {
8405  json.BeginString("Type ");
8406  json.ContinueString(typeIndex);
8407  json.EndString();
8408 
8409  json.BeginObject();
8410 
8411  json.WriteString("Flags");
8412  json.BeginArray(true);
8413  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
8414  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
8415  {
8416  json.WriteString("DEVICE_LOCAL");
8417  }
8418  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
8419  {
8420  json.WriteString("HOST_VISIBLE");
8421  }
8422  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
8423  {
8424  json.WriteString("HOST_COHERENT");
8425  }
8426  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
8427  {
8428  json.WriteString("HOST_CACHED");
8429  }
8430  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
8431  {
8432  json.WriteString("LAZILY_ALLOCATED");
8433  }
8434  json.EndArray();
8435 
8436  if(stats.memoryType[typeIndex].blockCount > 0)
8437  {
8438  json.WriteString("Stats");
8439  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
8440  }
8441 
8442  json.EndObject();
8443  }
8444  }
8445 
8446  json.EndObject();
8447  }
8448  if(detailedMap == VK_TRUE)
8449  {
8450  allocator->PrintDetailedMap(json);
8451  }
8452 
8453  json.EndObject();
8454  }
8455 
8456  const size_t len = sb.GetLength();
8457  char* const pChars = vma_new_array(allocator, char, len + 1);
8458  if(len > 0)
8459  {
8460  memcpy(pChars, sb.GetData(), len);
8461  }
8462  pChars[len] = '\0';
8463  *ppStatsString = pChars;
8464 }
8465 
8466 void vmaFreeStatsString(
8467  VmaAllocator allocator,
8468  char* pStatsString)
8469 {
8470  if(pStatsString != VMA_NULL)
8471  {
8472  VMA_ASSERT(allocator);
8473  size_t len = strlen(pStatsString);
8474  vma_delete_array(allocator, pStatsString, len + 1);
8475  }
8476 }
8477 
8478 #endif // #if VMA_STATS_STRING_ENABLED
8479 
8480 /*
8481 This function is not protected by any mutex because it just reads immutable data.
8482 */
8483 VkResult vmaFindMemoryTypeIndex(
8484  VmaAllocator allocator,
8485  uint32_t memoryTypeBits,
8486  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8487  uint32_t* pMemoryTypeIndex)
8488 {
8489  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8490  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8491  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8492 
8493  if(pAllocationCreateInfo->memoryTypeBits != 0)
8494  {
8495  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
8496  }
8497 
8498  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
8499  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
8500 
8501  // Convert usage to requiredFlags and preferredFlags.
8502  switch(pAllocationCreateInfo->usage)
8503  {
8505  break;
8507  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8508  break;
8510  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
8511  break;
8513  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8514  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8515  break;
8517  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8518  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
8519  break;
8520  default:
8521  break;
8522  }
8523 
8524  *pMemoryTypeIndex = UINT32_MAX;
8525  uint32_t minCost = UINT32_MAX;
8526  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
8527  memTypeIndex < allocator->GetMemoryTypeCount();
8528  ++memTypeIndex, memTypeBit <<= 1)
8529  {
8530  // This memory type is acceptable according to memoryTypeBits bitmask.
8531  if((memTypeBit & memoryTypeBits) != 0)
8532  {
8533  const VkMemoryPropertyFlags currFlags =
8534  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
8535  // This memory type contains requiredFlags.
8536  if((requiredFlags & ~currFlags) == 0)
8537  {
8538  // Calculate cost as number of bits from preferredFlags not present in this memory type.
8539  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
8540  // Remember memory type with lowest cost.
8541  if(currCost < minCost)
8542  {
8543  *pMemoryTypeIndex = memTypeIndex;
8544  if(currCost == 0)
8545  {
8546  return VK_SUCCESS;
8547  }
8548  minCost = currCost;
8549  }
8550  }
8551  }
8552  }
8553  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
8554 }
8555 
8557  VmaAllocator allocator,
8558  const VkBufferCreateInfo* pBufferCreateInfo,
8559  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8560  uint32_t* pMemoryTypeIndex)
8561 {
8562  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8563  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
8564  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8565  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8566 
8567  const VkDevice hDev = allocator->m_hDevice;
8568  VkBuffer hBuffer = VK_NULL_HANDLE;
8569  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
8570  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
8571  if(res == VK_SUCCESS)
8572  {
8573  VkMemoryRequirements memReq = {};
8574  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
8575  hDev, hBuffer, &memReq);
8576 
8577  res = vmaFindMemoryTypeIndex(
8578  allocator,
8579  memReq.memoryTypeBits,
8580  pAllocationCreateInfo,
8581  pMemoryTypeIndex);
8582 
8583  allocator->GetVulkanFunctions().vkDestroyBuffer(
8584  hDev, hBuffer, allocator->GetAllocationCallbacks());
8585  }
8586  return res;
8587 }
8588 
8590  VmaAllocator allocator,
8591  const VkImageCreateInfo* pImageCreateInfo,
8592  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8593  uint32_t* pMemoryTypeIndex)
8594 {
8595  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8596  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
8597  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8598  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8599 
8600  const VkDevice hDev = allocator->m_hDevice;
8601  VkImage hImage = VK_NULL_HANDLE;
8602  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
8603  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
8604  if(res == VK_SUCCESS)
8605  {
8606  VkMemoryRequirements memReq = {};
8607  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
8608  hDev, hImage, &memReq);
8609 
8610  res = vmaFindMemoryTypeIndex(
8611  allocator,
8612  memReq.memoryTypeBits,
8613  pAllocationCreateInfo,
8614  pMemoryTypeIndex);
8615 
8616  allocator->GetVulkanFunctions().vkDestroyImage(
8617  hDev, hImage, allocator->GetAllocationCallbacks());
8618  }
8619  return res;
8620 }
8621 
8622 VkResult vmaCreatePool(
8623  VmaAllocator allocator,
8624  const VmaPoolCreateInfo* pCreateInfo,
8625  VmaPool* pPool)
8626 {
8627  VMA_ASSERT(allocator && pCreateInfo && pPool);
8628 
8629  VMA_DEBUG_LOG("vmaCreatePool");
8630 
8631  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8632 
8633  return allocator->CreatePool(pCreateInfo, pPool);
8634 }
8635 
8636 void vmaDestroyPool(
8637  VmaAllocator allocator,
8638  VmaPool pool)
8639 {
8640  VMA_ASSERT(allocator);
8641 
8642  if(pool == VK_NULL_HANDLE)
8643  {
8644  return;
8645  }
8646 
8647  VMA_DEBUG_LOG("vmaDestroyPool");
8648 
8649  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8650 
8651  allocator->DestroyPool(pool);
8652 }
8653 
8654 void vmaGetPoolStats(
8655  VmaAllocator allocator,
8656  VmaPool pool,
8657  VmaPoolStats* pPoolStats)
8658 {
8659  VMA_ASSERT(allocator && pool && pPoolStats);
8660 
8661  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8662 
8663  allocator->GetPoolStats(pool, pPoolStats);
8664 }
8665 
8667  VmaAllocator allocator,
8668  VmaPool pool,
8669  size_t* pLostAllocationCount)
8670 {
8671  VMA_ASSERT(allocator && pool);
8672 
8673  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8674 
8675  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
8676 }
8677 
8678 VkResult vmaAllocateMemory(
8679  VmaAllocator allocator,
8680  const VkMemoryRequirements* pVkMemoryRequirements,
8681  const VmaAllocationCreateInfo* pCreateInfo,
8682  VmaAllocation* pAllocation,
8683  VmaAllocationInfo* pAllocationInfo)
8684 {
8685  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
8686 
8687  VMA_DEBUG_LOG("vmaAllocateMemory");
8688 
8689  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8690 
8691  VkResult result = allocator->AllocateMemory(
8692  *pVkMemoryRequirements,
8693  false, // requiresDedicatedAllocation
8694  false, // prefersDedicatedAllocation
8695  VK_NULL_HANDLE, // dedicatedBuffer
8696  VK_NULL_HANDLE, // dedicatedImage
8697  *pCreateInfo,
8698  VMA_SUBALLOCATION_TYPE_UNKNOWN,
8699  pAllocation);
8700 
8701  if(pAllocationInfo && result == VK_SUCCESS)
8702  {
8703  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8704  }
8705 
8706  return result;
8707 }
8708 
8710  VmaAllocator allocator,
8711  VkBuffer buffer,
8712  const VmaAllocationCreateInfo* pCreateInfo,
8713  VmaAllocation* pAllocation,
8714  VmaAllocationInfo* pAllocationInfo)
8715 {
8716  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
8717 
8718  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
8719 
8720  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8721 
8722  VkMemoryRequirements vkMemReq = {};
8723  bool requiresDedicatedAllocation = false;
8724  bool prefersDedicatedAllocation = false;
8725  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
8726  requiresDedicatedAllocation,
8727  prefersDedicatedAllocation);
8728 
8729  VkResult result = allocator->AllocateMemory(
8730  vkMemReq,
8731  requiresDedicatedAllocation,
8732  prefersDedicatedAllocation,
8733  buffer, // dedicatedBuffer
8734  VK_NULL_HANDLE, // dedicatedImage
8735  *pCreateInfo,
8736  VMA_SUBALLOCATION_TYPE_BUFFER,
8737  pAllocation);
8738 
8739  if(pAllocationInfo && result == VK_SUCCESS)
8740  {
8741  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8742  }
8743 
8744  return result;
8745 }
8746 
8747 VkResult vmaAllocateMemoryForImage(
8748  VmaAllocator allocator,
8749  VkImage image,
8750  const VmaAllocationCreateInfo* pCreateInfo,
8751  VmaAllocation* pAllocation,
8752  VmaAllocationInfo* pAllocationInfo)
8753 {
8754  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
8755 
8756  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
8757 
8758  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8759 
8760  VkResult result = AllocateMemoryForImage(
8761  allocator,
8762  image,
8763  pCreateInfo,
8764  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
8765  pAllocation);
8766 
8767  if(pAllocationInfo && result == VK_SUCCESS)
8768  {
8769  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8770  }
8771 
8772  return result;
8773 }
8774 
8775 void vmaFreeMemory(
8776  VmaAllocator allocator,
8777  VmaAllocation allocation)
8778 {
8779  VMA_ASSERT(allocator && allocation);
8780 
8781  VMA_DEBUG_LOG("vmaFreeMemory");
8782 
8783  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8784 
8785  allocator->FreeMemory(allocation);
8786 }
8787 
8789  VmaAllocator allocator,
8790  VmaAllocation allocation,
8791  VmaAllocationInfo* pAllocationInfo)
8792 {
8793  VMA_ASSERT(allocator && allocation && pAllocationInfo);
8794 
8795  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8796 
8797  allocator->GetAllocationInfo(allocation, pAllocationInfo);
8798 }
8799 
8800 VkBool32 vmaTouchAllocation(
8801  VmaAllocator allocator,
8802  VmaAllocation allocation)
8803 {
8804  VMA_ASSERT(allocator && allocation);
8805 
8806  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8807 
8808  return allocator->TouchAllocation(allocation);
8809 }
8810 
8812  VmaAllocator allocator,
8813  VmaAllocation allocation,
8814  void* pUserData)
8815 {
8816  VMA_ASSERT(allocator && allocation);
8817 
8818  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8819 
8820  allocation->SetUserData(allocator, pUserData);
8821 }
8822 
8824  VmaAllocator allocator,
8825  VmaAllocation* pAllocation)
8826 {
8827  VMA_ASSERT(allocator && pAllocation);
8828 
8829  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
8830 
8831  allocator->CreateLostAllocation(pAllocation);
8832 }
8833 
8834 VkResult vmaMapMemory(
8835  VmaAllocator allocator,
8836  VmaAllocation allocation,
8837  void** ppData)
8838 {
8839  VMA_ASSERT(allocator && allocation && ppData);
8840 
8841  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8842 
8843  return allocator->Map(allocation, ppData);
8844 }
8845 
8846 void vmaUnmapMemory(
8847  VmaAllocator allocator,
8848  VmaAllocation allocation)
8849 {
8850  VMA_ASSERT(allocator && allocation);
8851 
8852  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8853 
8854  allocator->Unmap(allocation);
8855 }
8856 
8857 VkResult vmaDefragment(
8858  VmaAllocator allocator,
8859  VmaAllocation* pAllocations,
8860  size_t allocationCount,
8861  VkBool32* pAllocationsChanged,
8862  const VmaDefragmentationInfo *pDefragmentationInfo,
8863  VmaDefragmentationStats* pDefragmentationStats)
8864 {
8865  VMA_ASSERT(allocator && pAllocations);
8866 
8867  VMA_DEBUG_LOG("vmaDefragment");
8868 
8869  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8870 
8871  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
8872 }
8873 
8874 VkResult vmaCreateBuffer(
8875  VmaAllocator allocator,
8876  const VkBufferCreateInfo* pBufferCreateInfo,
8877  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8878  VkBuffer* pBuffer,
8879  VmaAllocation* pAllocation,
8880  VmaAllocationInfo* pAllocationInfo)
8881 {
8882  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
8883 
8884  VMA_DEBUG_LOG("vmaCreateBuffer");
8885 
8886  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8887 
8888  *pBuffer = VK_NULL_HANDLE;
8889  *pAllocation = VK_NULL_HANDLE;
8890 
8891  // 1. Create VkBuffer.
8892  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
8893  allocator->m_hDevice,
8894  pBufferCreateInfo,
8895  allocator->GetAllocationCallbacks(),
8896  pBuffer);
8897  if(res >= 0)
8898  {
8899  // 2. vkGetBufferMemoryRequirements.
8900  VkMemoryRequirements vkMemReq = {};
8901  bool requiresDedicatedAllocation = false;
8902  bool prefersDedicatedAllocation = false;
8903  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
8904  requiresDedicatedAllocation, prefersDedicatedAllocation);
8905 
8906  // Make sure alignment requirements for specific buffer usages reported
8907  // in Physical Device Properties are included in alignment reported by memory requirements.
8908  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
8909  {
8910  VMA_ASSERT(vkMemReq.alignment %
8911  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
8912  }
8913  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
8914  {
8915  VMA_ASSERT(vkMemReq.alignment %
8916  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
8917  }
8918  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
8919  {
8920  VMA_ASSERT(vkMemReq.alignment %
8921  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
8922  }
8923 
8924  // 3. Allocate memory using allocator.
8925  res = allocator->AllocateMemory(
8926  vkMemReq,
8927  requiresDedicatedAllocation,
8928  prefersDedicatedAllocation,
8929  *pBuffer, // dedicatedBuffer
8930  VK_NULL_HANDLE, // dedicatedImage
8931  *pAllocationCreateInfo,
8932  VMA_SUBALLOCATION_TYPE_BUFFER,
8933  pAllocation);
8934  if(res >= 0)
8935  {
8936  // 3. Bind buffer with memory.
8937  res = (*allocator->GetVulkanFunctions().vkBindBufferMemory)(
8938  allocator->m_hDevice,
8939  *pBuffer,
8940  (*pAllocation)->GetMemory(),
8941  (*pAllocation)->GetOffset());
8942  if(res >= 0)
8943  {
8944  // All steps succeeded.
8945  if(pAllocationInfo != VMA_NULL)
8946  {
8947  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8948  }
8949  return VK_SUCCESS;
8950  }
8951  allocator->FreeMemory(*pAllocation);
8952  *pAllocation = VK_NULL_HANDLE;
8953  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
8954  *pBuffer = VK_NULL_HANDLE;
8955  return res;
8956  }
8957  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
8958  *pBuffer = VK_NULL_HANDLE;
8959  return res;
8960  }
8961  return res;
8962 }
8963 
8964 void vmaDestroyBuffer(
8965  VmaAllocator allocator,
8966  VkBuffer buffer,
8967  VmaAllocation allocation)
8968 {
8969  if(buffer != VK_NULL_HANDLE)
8970  {
8971  VMA_ASSERT(allocator);
8972 
8973  VMA_DEBUG_LOG("vmaDestroyBuffer");
8974 
8975  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8976 
8977  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
8978 
8979  allocator->FreeMemory(allocation);
8980  }
8981 }
8982 
8983 VkResult vmaCreateImage(
8984  VmaAllocator allocator,
8985  const VkImageCreateInfo* pImageCreateInfo,
8986  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8987  VkImage* pImage,
8988  VmaAllocation* pAllocation,
8989  VmaAllocationInfo* pAllocationInfo)
8990 {
8991  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
8992 
8993  VMA_DEBUG_LOG("vmaCreateImage");
8994 
8995  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8996 
8997  *pImage = VK_NULL_HANDLE;
8998  *pAllocation = VK_NULL_HANDLE;
8999 
9000  // 1. Create VkImage.
9001  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
9002  allocator->m_hDevice,
9003  pImageCreateInfo,
9004  allocator->GetAllocationCallbacks(),
9005  pImage);
9006  if(res >= 0)
9007  {
9008  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
9009  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
9010  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
9011 
9012  // 2. Allocate memory using allocator.
9013  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
9014  if(res >= 0)
9015  {
9016  // 3. Bind image with memory.
9017  res = (*allocator->GetVulkanFunctions().vkBindImageMemory)(
9018  allocator->m_hDevice,
9019  *pImage,
9020  (*pAllocation)->GetMemory(),
9021  (*pAllocation)->GetOffset());
9022  if(res >= 0)
9023  {
9024  // All steps succeeded.
9025  if(pAllocationInfo != VMA_NULL)
9026  {
9027  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
9028  }
9029  return VK_SUCCESS;
9030  }
9031  allocator->FreeMemory(*pAllocation);
9032  *pAllocation = VK_NULL_HANDLE;
9033  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
9034  *pImage = VK_NULL_HANDLE;
9035  return res;
9036  }
9037  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
9038  *pImage = VK_NULL_HANDLE;
9039  return res;
9040  }
9041  return res;
9042 }
9043 
9044 void vmaDestroyImage(
9045  VmaAllocator allocator,
9046  VkImage image,
9047  VmaAllocation allocation)
9048 {
9049  if(image != VK_NULL_HANDLE)
9050  {
9051  VMA_ASSERT(allocator);
9052 
9053  VMA_DEBUG_LOG("vmaDestroyImage");
9054 
9055  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9056 
9057  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
9058 
9059  allocator->FreeMemory(allocation);
9060  }
9061 }
9062 
9063 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1004
-
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:1258
+Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
935 #include <vulkan/vulkan.h>
936 
937 VK_DEFINE_HANDLE(VmaAllocator)
938 
939 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
941  VmaAllocator allocator,
942  uint32_t memoryType,
943  VkDeviceMemory memory,
944  VkDeviceSize size);
946 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
947  VmaAllocator allocator,
948  uint32_t memoryType,
949  VkDeviceMemory memory,
950  VkDeviceSize size);
951 
959 typedef struct VmaDeviceMemoryCallbacks {
965 
995 
998 typedef VkFlags VmaAllocatorCreateFlags;
999 
1004 typedef struct VmaVulkanFunctions {
1005  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1006  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1007  PFN_vkAllocateMemory vkAllocateMemory;
1008  PFN_vkFreeMemory vkFreeMemory;
1009  PFN_vkMapMemory vkMapMemory;
1010  PFN_vkUnmapMemory vkUnmapMemory;
1011  PFN_vkBindBufferMemory vkBindBufferMemory;
1012  PFN_vkBindImageMemory vkBindImageMemory;
1013  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1014  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1015  PFN_vkCreateBuffer vkCreateBuffer;
1016  PFN_vkDestroyBuffer vkDestroyBuffer;
1017  PFN_vkCreateImage vkCreateImage;
1018  PFN_vkDestroyImage vkDestroyImage;
1019  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1020  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1022 
1025 {
1027  VmaAllocatorCreateFlags flags;
1029 
1030  VkPhysicalDevice physicalDevice;
1032 
1033  VkDevice device;
1035 
1038 
1039  const VkAllocationCallbacks* pAllocationCallbacks;
1041 
1080  const VkDeviceSize* pHeapSizeLimit;
1094 
1096 VkResult vmaCreateAllocator(
1097  const VmaAllocatorCreateInfo* pCreateInfo,
1098  VmaAllocator* pAllocator);
1099 
1101 void vmaDestroyAllocator(
1102  VmaAllocator allocator);
1103 
1109  VmaAllocator allocator,
1110  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1111 
1117  VmaAllocator allocator,
1118  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1119 
1127  VmaAllocator allocator,
1128  uint32_t memoryTypeIndex,
1129  VkMemoryPropertyFlags* pFlags);
1130 
1140  VmaAllocator allocator,
1141  uint32_t frameIndex);
1142 
1145 typedef struct VmaStatInfo
1146 {
1148  uint32_t blockCount;
1154  VkDeviceSize usedBytes;
1156  VkDeviceSize unusedBytes;
1157  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
1158  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
1159 } VmaStatInfo;
1160 
1162 typedef struct VmaStats
1163 {
1164  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1165  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1167 } VmaStats;
1168 
1170 void vmaCalculateStats(
1171  VmaAllocator allocator,
1172  VmaStats* pStats);
1173 
1174 #define VMA_STATS_STRING_ENABLED 1
1175 
1176 #if VMA_STATS_STRING_ENABLED
1177 
1179 
1181 void vmaBuildStatsString(
1182  VmaAllocator allocator,
1183  char** ppStatsString,
1184  VkBool32 detailedMap);
1185 
1186 void vmaFreeStatsString(
1187  VmaAllocator allocator,
1188  char* pStatsString);
1189 
1190 #endif // #if VMA_STATS_STRING_ENABLED
1191 
1192 VK_DEFINE_HANDLE(VmaPool)
1193 
1194 typedef enum VmaMemoryUsage
1195 {
1244 } VmaMemoryUsage;
1245 
1260 
1310 
1314 
1316 {
1318  VmaAllocationCreateFlags flags;
1329  VkMemoryPropertyFlags requiredFlags;
1334  VkMemoryPropertyFlags preferredFlags;
1342  uint32_t memoryTypeBits;
1348  VmaPool pool;
1355  void* pUserData;
1357 
1374 VkResult vmaFindMemoryTypeIndex(
1375  VmaAllocator allocator,
1376  uint32_t memoryTypeBits,
1377  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1378  uint32_t* pMemoryTypeIndex);
1379 
1393  VmaAllocator allocator,
1394  const VkBufferCreateInfo* pBufferCreateInfo,
1395  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1396  uint32_t* pMemoryTypeIndex);
1397 
1411  VmaAllocator allocator,
1412  const VkImageCreateInfo* pImageCreateInfo,
1413  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1414  uint32_t* pMemoryTypeIndex);
1415 
1436 
1439 typedef VkFlags VmaPoolCreateFlags;
1440 
1443 typedef struct VmaPoolCreateInfo {
1449  VmaPoolCreateFlags flags;
1454  VkDeviceSize blockSize;
1483 
1486 typedef struct VmaPoolStats {
1489  VkDeviceSize size;
1492  VkDeviceSize unusedSize;
1505  VkDeviceSize unusedRangeSizeMax;
1506 } VmaPoolStats;
1507 
1514 VkResult vmaCreatePool(
1515  VmaAllocator allocator,
1516  const VmaPoolCreateInfo* pCreateInfo,
1517  VmaPool* pPool);
1518 
1521 void vmaDestroyPool(
1522  VmaAllocator allocator,
1523  VmaPool pool);
1524 
1531 void vmaGetPoolStats(
1532  VmaAllocator allocator,
1533  VmaPool pool,
1534  VmaPoolStats* pPoolStats);
1535 
1543  VmaAllocator allocator,
1544  VmaPool pool,
1545  size_t* pLostAllocationCount);
1546 
1547 VK_DEFINE_HANDLE(VmaAllocation)
1548 
1549 
1551 typedef struct VmaAllocationInfo {
1556  uint32_t memoryType;
1565  VkDeviceMemory deviceMemory;
1570  VkDeviceSize offset;
1575  VkDeviceSize size;
1589  void* pUserData;
1591 
1602 VkResult vmaAllocateMemory(
1603  VmaAllocator allocator,
1604  const VkMemoryRequirements* pVkMemoryRequirements,
1605  const VmaAllocationCreateInfo* pCreateInfo,
1606  VmaAllocation* pAllocation,
1607  VmaAllocationInfo* pAllocationInfo);
1608 
1616  VmaAllocator allocator,
1617  VkBuffer buffer,
1618  const VmaAllocationCreateInfo* pCreateInfo,
1619  VmaAllocation* pAllocation,
1620  VmaAllocationInfo* pAllocationInfo);
1621 
1623 VkResult vmaAllocateMemoryForImage(
1624  VmaAllocator allocator,
1625  VkImage image,
1626  const VmaAllocationCreateInfo* pCreateInfo,
1627  VmaAllocation* pAllocation,
1628  VmaAllocationInfo* pAllocationInfo);
1629 
1631 void vmaFreeMemory(
1632  VmaAllocator allocator,
1633  VmaAllocation allocation);
1634 
1652  VmaAllocator allocator,
1653  VmaAllocation allocation,
1654  VmaAllocationInfo* pAllocationInfo);
1655 
1670 VkBool32 vmaTouchAllocation(
1671  VmaAllocator allocator,
1672  VmaAllocation allocation);
1673 
1688  VmaAllocator allocator,
1689  VmaAllocation allocation,
1690  void* pUserData);
1691 
1703  VmaAllocator allocator,
1704  VmaAllocation* pAllocation);
1705 
1740 VkResult vmaMapMemory(
1741  VmaAllocator allocator,
1742  VmaAllocation allocation,
1743  void** ppData);
1744 
1749 void vmaUnmapMemory(
1750  VmaAllocator allocator,
1751  VmaAllocation allocation);
1752 
1754 typedef struct VmaDefragmentationInfo {
1759  VkDeviceSize maxBytesToMove;
1766 
1768 typedef struct VmaDefragmentationStats {
1770  VkDeviceSize bytesMoved;
1772  VkDeviceSize bytesFreed;
1778 
1861 VkResult vmaDefragment(
1862  VmaAllocator allocator,
1863  VmaAllocation* pAllocations,
1864  size_t allocationCount,
1865  VkBool32* pAllocationsChanged,
1866  const VmaDefragmentationInfo *pDefragmentationInfo,
1867  VmaDefragmentationStats* pDefragmentationStats);
1868 
1895 VkResult vmaCreateBuffer(
1896  VmaAllocator allocator,
1897  const VkBufferCreateInfo* pBufferCreateInfo,
1898  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1899  VkBuffer* pBuffer,
1900  VmaAllocation* pAllocation,
1901  VmaAllocationInfo* pAllocationInfo);
1902 
1914 void vmaDestroyBuffer(
1915  VmaAllocator allocator,
1916  VkBuffer buffer,
1917  VmaAllocation allocation);
1918 
1920 VkResult vmaCreateImage(
1921  VmaAllocator allocator,
1922  const VkImageCreateInfo* pImageCreateInfo,
1923  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1924  VkImage* pImage,
1925  VmaAllocation* pAllocation,
1926  VmaAllocationInfo* pAllocationInfo);
1927 
1939 void vmaDestroyImage(
1940  VmaAllocator allocator,
1941  VkImage image,
1942  VmaAllocation allocation);
1943 
1944 #ifdef __cplusplus
1945 }
1946 #endif
1947 
1948 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
1949 
1950 // For Visual Studio IntelliSense.
1951 #ifdef __INTELLISENSE__
1952 #define VMA_IMPLEMENTATION
1953 #endif
1954 
1955 #ifdef VMA_IMPLEMENTATION
1956 #undef VMA_IMPLEMENTATION
1957 
1958 #include <cstdint>
1959 #include <cstdlib>
1960 #include <cstring>
1961 
1962 /*******************************************************************************
1963 CONFIGURATION SECTION
1964 
1965 Define some of these macros before each #include of this header or change them
1966 here if you need other then default behavior depending on your environment.
1967 */
1968 
1969 /*
1970 Define this macro to 1 to make the library fetch pointers to Vulkan functions
1971 internally, like:
1972 
1973  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
1974 
1975 Define to 0 if you are going to provide you own pointers to Vulkan functions via
1976 VmaAllocatorCreateInfo::pVulkanFunctions.
1977 */
1978 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
1979 #define VMA_STATIC_VULKAN_FUNCTIONS 1
1980 #endif
1981 
1982 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
1983 //#define VMA_USE_STL_CONTAINERS 1
1984 
1985 /* Set this macro to 1 to make the library including and using STL containers:
1986 std::pair, std::vector, std::list, std::unordered_map.
1987 
1988 Set it to 0 or undefined to make the library using its own implementation of
1989 the containers.
1990 */
1991 #if VMA_USE_STL_CONTAINERS
1992  #define VMA_USE_STL_VECTOR 1
1993  #define VMA_USE_STL_UNORDERED_MAP 1
1994  #define VMA_USE_STL_LIST 1
1995 #endif
1996 
1997 #if VMA_USE_STL_VECTOR
1998  #include <vector>
1999 #endif
2000 
2001 #if VMA_USE_STL_UNORDERED_MAP
2002  #include <unordered_map>
2003 #endif
2004 
2005 #if VMA_USE_STL_LIST
2006  #include <list>
2007 #endif
2008 
2009 /*
2010 Following headers are used in this CONFIGURATION section only, so feel free to
2011 remove them if not needed.
2012 */
2013 #include <cassert> // for assert
2014 #include <algorithm> // for min, max
2015 #include <mutex> // for std::mutex
2016 #include <atomic> // for std::atomic
2017 
2018 #if !defined(_WIN32) && !defined(__APPLE__)
2019  #include <malloc.h> // for aligned_alloc()
2020 #endif
2021 
2022 #ifndef VMA_NULL
2023  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
2024  #define VMA_NULL nullptr
2025 #endif
2026 
2027 #if defined(__APPLE__) || defined(__ANDROID__)
2028 #include <cstdlib>
2029 void *aligned_alloc(size_t alignment, size_t size)
2030 {
2031  // alignment must be >= sizeof(void*)
2032  if(alignment < sizeof(void*))
2033  {
2034  alignment = sizeof(void*);
2035  }
2036 
2037  void *pointer;
2038  if(posix_memalign(&pointer, alignment, size) == 0)
2039  return pointer;
2040  return VMA_NULL;
2041 }
2042 #endif
2043 
2044 // Normal assert to check for programmer's errors, especially in Debug configuration.
2045 #ifndef VMA_ASSERT
2046  #ifdef _DEBUG
2047  #define VMA_ASSERT(expr) assert(expr)
2048  #else
2049  #define VMA_ASSERT(expr)
2050  #endif
2051 #endif
2052 
2053 // Assert that will be called very often, like inside data structures e.g. operator[].
2054 // Making it non-empty can make program slow.
2055 #ifndef VMA_HEAVY_ASSERT
2056  #ifdef _DEBUG
2057  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
2058  #else
2059  #define VMA_HEAVY_ASSERT(expr)
2060  #endif
2061 #endif
2062 
2063 #ifndef VMA_ALIGN_OF
2064  #define VMA_ALIGN_OF(type) (__alignof(type))
2065 #endif
2066 
2067 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
2068  #if defined(_WIN32)
2069  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
2070  #else
2071  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
2072  #endif
2073 #endif
2074 
2075 #ifndef VMA_SYSTEM_FREE
2076  #if defined(_WIN32)
2077  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
2078  #else
2079  #define VMA_SYSTEM_FREE(ptr) free(ptr)
2080  #endif
2081 #endif
2082 
2083 #ifndef VMA_MIN
2084  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
2085 #endif
2086 
2087 #ifndef VMA_MAX
2088  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
2089 #endif
2090 
2091 #ifndef VMA_SWAP
2092  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
2093 #endif
2094 
2095 #ifndef VMA_SORT
2096  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
2097 #endif
2098 
2099 #ifndef VMA_DEBUG_LOG
2100  #define VMA_DEBUG_LOG(format, ...)
2101  /*
2102  #define VMA_DEBUG_LOG(format, ...) do { \
2103  printf(format, __VA_ARGS__); \
2104  printf("\n"); \
2105  } while(false)
2106  */
2107 #endif
2108 
2109 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
2110 #if VMA_STATS_STRING_ENABLED
2111  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
2112  {
2113  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
2114  }
2115  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
2116  {
2117  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
2118  }
2119  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
2120  {
2121  snprintf(outStr, strLen, "%p", ptr);
2122  }
2123 #endif
2124 
2125 #ifndef VMA_MUTEX
2126  class VmaMutex
2127  {
2128  public:
2129  VmaMutex() { }
2130  ~VmaMutex() { }
2131  void Lock() { m_Mutex.lock(); }
2132  void Unlock() { m_Mutex.unlock(); }
2133  private:
2134  std::mutex m_Mutex;
2135  };
2136  #define VMA_MUTEX VmaMutex
2137 #endif
2138 
2139 /*
2140 If providing your own implementation, you need to implement a subset of std::atomic:
2141 
2142 - Constructor(uint32_t desired)
2143 - uint32_t load() const
2144 - void store(uint32_t desired)
2145 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
2146 */
2147 #ifndef VMA_ATOMIC_UINT32
2148  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
2149 #endif
2150 
2151 #ifndef VMA_BEST_FIT
2152 
2164  #define VMA_BEST_FIT (1)
2165 #endif
2166 
2167 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
2168 
2172  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
2173 #endif
2174 
2175 #ifndef VMA_DEBUG_ALIGNMENT
2176 
2180  #define VMA_DEBUG_ALIGNMENT (1)
2181 #endif
2182 
2183 #ifndef VMA_DEBUG_MARGIN
2184 
2188  #define VMA_DEBUG_MARGIN (0)
2189 #endif
2190 
2191 #ifndef VMA_DEBUG_GLOBAL_MUTEX
2192 
2196  #define VMA_DEBUG_GLOBAL_MUTEX (0)
2197 #endif
2198 
2199 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
2200 
2204  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
2205 #endif
2206 
2207 #ifndef VMA_SMALL_HEAP_MAX_SIZE
2208  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
2210 #endif
2211 
2212 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
2213  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
2215 #endif
2216 
2217 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
2218 
2219 /*******************************************************************************
2220 END OF CONFIGURATION
2221 */
2222 
2223 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
2224  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
2225 
2226 // Returns number of bits set to 1 in (v).
2227 static inline uint32_t VmaCountBitsSet(uint32_t v)
2228 {
2229  uint32_t c = v - ((v >> 1) & 0x55555555);
2230  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
2231  c = ((c >> 4) + c) & 0x0F0F0F0F;
2232  c = ((c >> 8) + c) & 0x00FF00FF;
2233  c = ((c >> 16) + c) & 0x0000FFFF;
2234  return c;
2235 }
2236 
2237 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
2238 // Use types like uint32_t, uint64_t as T.
2239 template <typename T>
2240 static inline T VmaAlignUp(T val, T align)
2241 {
2242  return (val + align - 1) / align * align;
2243 }
2244 
2245 // Division with mathematical rounding to nearest number.
2246 template <typename T>
2247 inline T VmaRoundDiv(T x, T y)
2248 {
2249  return (x + (y / (T)2)) / y;
2250 }
2251 
2252 #ifndef VMA_SORT
2253 
2254 template<typename Iterator, typename Compare>
2255 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
2256 {
2257  Iterator centerValue = end; --centerValue;
2258  Iterator insertIndex = beg;
2259  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
2260  {
2261  if(cmp(*memTypeIndex, *centerValue))
2262  {
2263  if(insertIndex != memTypeIndex)
2264  {
2265  VMA_SWAP(*memTypeIndex, *insertIndex);
2266  }
2267  ++insertIndex;
2268  }
2269  }
2270  if(insertIndex != centerValue)
2271  {
2272  VMA_SWAP(*insertIndex, *centerValue);
2273  }
2274  return insertIndex;
2275 }
2276 
2277 template<typename Iterator, typename Compare>
2278 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
2279 {
2280  if(beg < end)
2281  {
2282  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
2283  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
2284  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
2285  }
2286 }
2287 
2288 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
2289 
2290 #endif // #ifndef VMA_SORT
2291 
2292 /*
2293 Returns true if two memory blocks occupy overlapping pages.
2294 ResourceA must be in less memory offset than ResourceB.
2295 
2296 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
2297 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
2298 */
2299 static inline bool VmaBlocksOnSamePage(
2300  VkDeviceSize resourceAOffset,
2301  VkDeviceSize resourceASize,
2302  VkDeviceSize resourceBOffset,
2303  VkDeviceSize pageSize)
2304 {
2305  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
2306  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
2307  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
2308  VkDeviceSize resourceBStart = resourceBOffset;
2309  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
2310  return resourceAEndPage == resourceBStartPage;
2311 }
2312 
2313 enum VmaSuballocationType
2314 {
2315  VMA_SUBALLOCATION_TYPE_FREE = 0,
2316  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
2317  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
2318  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
2319  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
2320  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
2321  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
2322 };
2323 
2324 /*
2325 Returns true if given suballocation types could conflict and must respect
2326 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
2327 or linear image and another one is optimal image. If type is unknown, behave
2328 conservatively.
2329 */
2330 static inline bool VmaIsBufferImageGranularityConflict(
2331  VmaSuballocationType suballocType1,
2332  VmaSuballocationType suballocType2)
2333 {
2334  if(suballocType1 > suballocType2)
2335  {
2336  VMA_SWAP(suballocType1, suballocType2);
2337  }
2338 
2339  switch(suballocType1)
2340  {
2341  case VMA_SUBALLOCATION_TYPE_FREE:
2342  return false;
2343  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
2344  return true;
2345  case VMA_SUBALLOCATION_TYPE_BUFFER:
2346  return
2347  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2348  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2349  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
2350  return
2351  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2352  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
2353  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2354  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
2355  return
2356  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2357  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
2358  return false;
2359  default:
2360  VMA_ASSERT(0);
2361  return true;
2362  }
2363 }
2364 
2365 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
2366 struct VmaMutexLock
2367 {
2368 public:
2369  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
2370  m_pMutex(useMutex ? &mutex : VMA_NULL)
2371  {
2372  if(m_pMutex)
2373  {
2374  m_pMutex->Lock();
2375  }
2376  }
2377 
2378  ~VmaMutexLock()
2379  {
2380  if(m_pMutex)
2381  {
2382  m_pMutex->Unlock();
2383  }
2384  }
2385 
2386 private:
2387  VMA_MUTEX* m_pMutex;
2388 };
2389 
2390 #if VMA_DEBUG_GLOBAL_MUTEX
2391  static VMA_MUTEX gDebugGlobalMutex;
2392  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
2393 #else
2394  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
2395 #endif
2396 
2397 // Minimum size of a free suballocation to register it in the free suballocation collection.
2398 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
2399 
2400 /*
2401 Performs binary search and returns iterator to first element that is greater or
2402 equal to (key), according to comparison (cmp).
2403 
2404 Cmp should return true if first argument is less than second argument.
2405 
2406 Returned value is the found element, if present in the collection or place where
2407 new element with value (key) should be inserted.
2408 */
2409 template <typename IterT, typename KeyT, typename CmpT>
2410 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
2411 {
2412  size_t down = 0, up = (end - beg);
2413  while(down < up)
2414  {
2415  const size_t mid = (down + up) / 2;
2416  if(cmp(*(beg+mid), key))
2417  {
2418  down = mid + 1;
2419  }
2420  else
2421  {
2422  up = mid;
2423  }
2424  }
2425  return beg + down;
2426 }
2427 
2429 // Memory allocation
2430 
2431 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
2432 {
2433  if((pAllocationCallbacks != VMA_NULL) &&
2434  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
2435  {
2436  return (*pAllocationCallbacks->pfnAllocation)(
2437  pAllocationCallbacks->pUserData,
2438  size,
2439  alignment,
2440  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2441  }
2442  else
2443  {
2444  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
2445  }
2446 }
2447 
2448 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
2449 {
2450  if((pAllocationCallbacks != VMA_NULL) &&
2451  (pAllocationCallbacks->pfnFree != VMA_NULL))
2452  {
2453  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
2454  }
2455  else
2456  {
2457  VMA_SYSTEM_FREE(ptr);
2458  }
2459 }
2460 
2461 template<typename T>
2462 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
2463 {
2464  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
2465 }
2466 
2467 template<typename T>
2468 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
2469 {
2470  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
2471 }
2472 
2473 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
2474 
2475 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
2476 
2477 template<typename T>
2478 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
2479 {
2480  ptr->~T();
2481  VmaFree(pAllocationCallbacks, ptr);
2482 }
2483 
2484 template<typename T>
2485 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
2486 {
2487  if(ptr != VMA_NULL)
2488  {
2489  for(size_t i = count; i--; )
2490  {
2491  ptr[i].~T();
2492  }
2493  VmaFree(pAllocationCallbacks, ptr);
2494  }
2495 }
2496 
2497 // STL-compatible allocator.
2498 template<typename T>
2499 class VmaStlAllocator
2500 {
2501 public:
2502  const VkAllocationCallbacks* const m_pCallbacks;
2503  typedef T value_type;
2504 
2505  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
2506  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
2507 
2508  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
2509  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
2510 
2511  template<typename U>
2512  bool operator==(const VmaStlAllocator<U>& rhs) const
2513  {
2514  return m_pCallbacks == rhs.m_pCallbacks;
2515  }
2516  template<typename U>
2517  bool operator!=(const VmaStlAllocator<U>& rhs) const
2518  {
2519  return m_pCallbacks != rhs.m_pCallbacks;
2520  }
2521 
2522  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
2523 };
2524 
2525 #if VMA_USE_STL_VECTOR
2526 
2527 #define VmaVector std::vector
2528 
2529 template<typename T, typename allocatorT>
2530 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
2531 {
2532  vec.insert(vec.begin() + index, item);
2533 }
2534 
2535 template<typename T, typename allocatorT>
2536 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
2537 {
2538  vec.erase(vec.begin() + index);
2539 }
2540 
2541 #else // #if VMA_USE_STL_VECTOR
2542 
2543 /* Class with interface compatible with subset of std::vector.
2544 T must be POD because constructors and destructors are not called and memcpy is
2545 used for these objects. */
2546 template<typename T, typename AllocatorT>
2547 class VmaVector
2548 {
2549 public:
2550  typedef T value_type;
2551 
2552  VmaVector(const AllocatorT& allocator) :
2553  m_Allocator(allocator),
2554  m_pArray(VMA_NULL),
2555  m_Count(0),
2556  m_Capacity(0)
2557  {
2558  }
2559 
2560  VmaVector(size_t count, const AllocatorT& allocator) :
2561  m_Allocator(allocator),
2562  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
2563  m_Count(count),
2564  m_Capacity(count)
2565  {
2566  }
2567 
2568  VmaVector(const VmaVector<T, AllocatorT>& src) :
2569  m_Allocator(src.m_Allocator),
2570  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
2571  m_Count(src.m_Count),
2572  m_Capacity(src.m_Count)
2573  {
2574  if(m_Count != 0)
2575  {
2576  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
2577  }
2578  }
2579 
2580  ~VmaVector()
2581  {
2582  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2583  }
2584 
2585  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
2586  {
2587  if(&rhs != this)
2588  {
2589  resize(rhs.m_Count);
2590  if(m_Count != 0)
2591  {
2592  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
2593  }
2594  }
2595  return *this;
2596  }
2597 
2598  bool empty() const { return m_Count == 0; }
2599  size_t size() const { return m_Count; }
2600  T* data() { return m_pArray; }
2601  const T* data() const { return m_pArray; }
2602 
2603  T& operator[](size_t index)
2604  {
2605  VMA_HEAVY_ASSERT(index < m_Count);
2606  return m_pArray[index];
2607  }
2608  const T& operator[](size_t index) const
2609  {
2610  VMA_HEAVY_ASSERT(index < m_Count);
2611  return m_pArray[index];
2612  }
2613 
2614  T& front()
2615  {
2616  VMA_HEAVY_ASSERT(m_Count > 0);
2617  return m_pArray[0];
2618  }
2619  const T& front() const
2620  {
2621  VMA_HEAVY_ASSERT(m_Count > 0);
2622  return m_pArray[0];
2623  }
2624  T& back()
2625  {
2626  VMA_HEAVY_ASSERT(m_Count > 0);
2627  return m_pArray[m_Count - 1];
2628  }
2629  const T& back() const
2630  {
2631  VMA_HEAVY_ASSERT(m_Count > 0);
2632  return m_pArray[m_Count - 1];
2633  }
2634 
2635  void reserve(size_t newCapacity, bool freeMemory = false)
2636  {
2637  newCapacity = VMA_MAX(newCapacity, m_Count);
2638 
2639  if((newCapacity < m_Capacity) && !freeMemory)
2640  {
2641  newCapacity = m_Capacity;
2642  }
2643 
2644  if(newCapacity != m_Capacity)
2645  {
2646  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
2647  if(m_Count != 0)
2648  {
2649  memcpy(newArray, m_pArray, m_Count * sizeof(T));
2650  }
2651  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2652  m_Capacity = newCapacity;
2653  m_pArray = newArray;
2654  }
2655  }
2656 
2657  void resize(size_t newCount, bool freeMemory = false)
2658  {
2659  size_t newCapacity = m_Capacity;
2660  if(newCount > m_Capacity)
2661  {
2662  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
2663  }
2664  else if(freeMemory)
2665  {
2666  newCapacity = newCount;
2667  }
2668 
2669  if(newCapacity != m_Capacity)
2670  {
2671  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
2672  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
2673  if(elementsToCopy != 0)
2674  {
2675  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
2676  }
2677  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2678  m_Capacity = newCapacity;
2679  m_pArray = newArray;
2680  }
2681 
2682  m_Count = newCount;
2683  }
2684 
2685  void clear(bool freeMemory = false)
2686  {
2687  resize(0, freeMemory);
2688  }
2689 
2690  void insert(size_t index, const T& src)
2691  {
2692  VMA_HEAVY_ASSERT(index <= m_Count);
2693  const size_t oldCount = size();
2694  resize(oldCount + 1);
2695  if(index < oldCount)
2696  {
2697  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
2698  }
2699  m_pArray[index] = src;
2700  }
2701 
2702  void remove(size_t index)
2703  {
2704  VMA_HEAVY_ASSERT(index < m_Count);
2705  const size_t oldCount = size();
2706  if(index < oldCount - 1)
2707  {
2708  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
2709  }
2710  resize(oldCount - 1);
2711  }
2712 
2713  void push_back(const T& src)
2714  {
2715  const size_t newIndex = size();
2716  resize(newIndex + 1);
2717  m_pArray[newIndex] = src;
2718  }
2719 
2720  void pop_back()
2721  {
2722  VMA_HEAVY_ASSERT(m_Count > 0);
2723  resize(size() - 1);
2724  }
2725 
2726  void push_front(const T& src)
2727  {
2728  insert(0, src);
2729  }
2730 
2731  void pop_front()
2732  {
2733  VMA_HEAVY_ASSERT(m_Count > 0);
2734  remove(0);
2735  }
2736 
2737  typedef T* iterator;
2738 
2739  iterator begin() { return m_pArray; }
2740  iterator end() { return m_pArray + m_Count; }
2741 
2742 private:
2743  AllocatorT m_Allocator;
2744  T* m_pArray;
2745  size_t m_Count;
2746  size_t m_Capacity;
2747 };
2748 
2749 template<typename T, typename allocatorT>
2750 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
2751 {
2752  vec.insert(index, item);
2753 }
2754 
2755 template<typename T, typename allocatorT>
2756 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
2757 {
2758  vec.remove(index);
2759 }
2760 
2761 #endif // #if VMA_USE_STL_VECTOR
2762 
2763 template<typename CmpLess, typename VectorT>
2764 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
2765 {
2766  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2767  vector.data(),
2768  vector.data() + vector.size(),
2769  value,
2770  CmpLess()) - vector.data();
2771  VmaVectorInsert(vector, indexToInsert, value);
2772  return indexToInsert;
2773 }
2774 
2775 template<typename CmpLess, typename VectorT>
2776 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
2777 {
2778  CmpLess comparator;
2779  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2780  vector.begin(),
2781  vector.end(),
2782  value,
2783  comparator);
2784  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
2785  {
2786  size_t indexToRemove = it - vector.begin();
2787  VmaVectorRemove(vector, indexToRemove);
2788  return true;
2789  }
2790  return false;
2791 }
2792 
2793 template<typename CmpLess, typename VectorT>
2794 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
2795 {
2796  CmpLess comparator;
2797  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2798  vector.data(),
2799  vector.data() + vector.size(),
2800  value,
2801  comparator);
2802  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
2803  {
2804  return it - vector.begin();
2805  }
2806  else
2807  {
2808  return vector.size();
2809  }
2810 }
2811 
2813 // class VmaPoolAllocator
2814 
2815 /*
2816 Allocator for objects of type T using a list of arrays (pools) to speed up
2817 allocation. Number of elements that can be allocated is not bounded because
2818 allocator can create multiple blocks.
2819 */
2820 template<typename T>
2821 class VmaPoolAllocator
2822 {
2823 public:
2824  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
2825  ~VmaPoolAllocator();
2826  void Clear();
2827  T* Alloc();
2828  void Free(T* ptr);
2829 
2830 private:
2831  union Item
2832  {
2833  uint32_t NextFreeIndex;
2834  T Value;
2835  };
2836 
2837  struct ItemBlock
2838  {
2839  Item* pItems;
2840  uint32_t FirstFreeIndex;
2841  };
2842 
2843  const VkAllocationCallbacks* m_pAllocationCallbacks;
2844  size_t m_ItemsPerBlock;
2845  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
2846 
2847  ItemBlock& CreateNewBlock();
2848 };
2849 
2850 template<typename T>
2851 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
2852  m_pAllocationCallbacks(pAllocationCallbacks),
2853  m_ItemsPerBlock(itemsPerBlock),
2854  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
2855 {
2856  VMA_ASSERT(itemsPerBlock > 0);
2857 }
2858 
2859 template<typename T>
2860 VmaPoolAllocator<T>::~VmaPoolAllocator()
2861 {
2862  Clear();
2863 }
2864 
2865 template<typename T>
2866 void VmaPoolAllocator<T>::Clear()
2867 {
2868  for(size_t i = m_ItemBlocks.size(); i--; )
2869  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
2870  m_ItemBlocks.clear();
2871 }
2872 
2873 template<typename T>
2874 T* VmaPoolAllocator<T>::Alloc()
2875 {
2876  for(size_t i = m_ItemBlocks.size(); i--; )
2877  {
2878  ItemBlock& block = m_ItemBlocks[i];
2879  // This block has some free items: Use first one.
2880  if(block.FirstFreeIndex != UINT32_MAX)
2881  {
2882  Item* const pItem = &block.pItems[block.FirstFreeIndex];
2883  block.FirstFreeIndex = pItem->NextFreeIndex;
2884  return &pItem->Value;
2885  }
2886  }
2887 
2888  // No block has free item: Create new one and use it.
2889  ItemBlock& newBlock = CreateNewBlock();
2890  Item* const pItem = &newBlock.pItems[0];
2891  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
2892  return &pItem->Value;
2893 }
2894 
2895 template<typename T>
2896 void VmaPoolAllocator<T>::Free(T* ptr)
2897 {
2898  // Search all memory blocks to find ptr.
2899  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
2900  {
2901  ItemBlock& block = m_ItemBlocks[i];
2902 
2903  // Casting to union.
2904  Item* pItemPtr;
2905  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
2906 
2907  // Check if pItemPtr is in address range of this block.
2908  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
2909  {
2910  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
2911  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
2912  block.FirstFreeIndex = index;
2913  return;
2914  }
2915  }
2916  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
2917 }
2918 
2919 template<typename T>
2920 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
2921 {
2922  ItemBlock newBlock = {
2923  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
2924 
2925  m_ItemBlocks.push_back(newBlock);
2926 
2927  // Setup singly-linked list of all free items in this block.
2928  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
2929  newBlock.pItems[i].NextFreeIndex = i + 1;
2930  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
2931  return m_ItemBlocks.back();
2932 }
2933 
2935 // class VmaRawList, VmaList
2936 
2937 #if VMA_USE_STL_LIST
2938 
2939 #define VmaList std::list
2940 
2941 #else // #if VMA_USE_STL_LIST
2942 
2943 template<typename T>
2944 struct VmaListItem
2945 {
2946  VmaListItem* pPrev;
2947  VmaListItem* pNext;
2948  T Value;
2949 };
2950 
2951 // Doubly linked list.
2952 template<typename T>
2953 class VmaRawList
2954 {
2955 public:
2956  typedef VmaListItem<T> ItemType;
2957 
2958  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
2959  ~VmaRawList();
2960  void Clear();
2961 
2962  size_t GetCount() const { return m_Count; }
2963  bool IsEmpty() const { return m_Count == 0; }
2964 
2965  ItemType* Front() { return m_pFront; }
2966  const ItemType* Front() const { return m_pFront; }
2967  ItemType* Back() { return m_pBack; }
2968  const ItemType* Back() const { return m_pBack; }
2969 
2970  ItemType* PushBack();
2971  ItemType* PushFront();
2972  ItemType* PushBack(const T& value);
2973  ItemType* PushFront(const T& value);
2974  void PopBack();
2975  void PopFront();
2976 
2977  // Item can be null - it means PushBack.
2978  ItemType* InsertBefore(ItemType* pItem);
2979  // Item can be null - it means PushFront.
2980  ItemType* InsertAfter(ItemType* pItem);
2981 
2982  ItemType* InsertBefore(ItemType* pItem, const T& value);
2983  ItemType* InsertAfter(ItemType* pItem, const T& value);
2984 
2985  void Remove(ItemType* pItem);
2986 
2987 private:
2988  const VkAllocationCallbacks* const m_pAllocationCallbacks;
2989  VmaPoolAllocator<ItemType> m_ItemAllocator;
2990  ItemType* m_pFront;
2991  ItemType* m_pBack;
2992  size_t m_Count;
2993 
2994  // Declared not defined, to block copy constructor and assignment operator.
2995  VmaRawList(const VmaRawList<T>& src);
2996  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
2997 };
2998 
2999 template<typename T>
3000 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
3001  m_pAllocationCallbacks(pAllocationCallbacks),
3002  m_ItemAllocator(pAllocationCallbacks, 128),
3003  m_pFront(VMA_NULL),
3004  m_pBack(VMA_NULL),
3005  m_Count(0)
3006 {
3007 }
3008 
3009 template<typename T>
3010 VmaRawList<T>::~VmaRawList()
3011 {
3012  // Intentionally not calling Clear, because that would be unnecessary
3013  // computations to return all items to m_ItemAllocator as free.
3014 }
3015 
3016 template<typename T>
3017 void VmaRawList<T>::Clear()
3018 {
3019  if(IsEmpty() == false)
3020  {
3021  ItemType* pItem = m_pBack;
3022  while(pItem != VMA_NULL)
3023  {
3024  ItemType* const pPrevItem = pItem->pPrev;
3025  m_ItemAllocator.Free(pItem);
3026  pItem = pPrevItem;
3027  }
3028  m_pFront = VMA_NULL;
3029  m_pBack = VMA_NULL;
3030  m_Count = 0;
3031  }
3032 }
3033 
3034 template<typename T>
3035 VmaListItem<T>* VmaRawList<T>::PushBack()
3036 {
3037  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3038  pNewItem->pNext = VMA_NULL;
3039  if(IsEmpty())
3040  {
3041  pNewItem->pPrev = VMA_NULL;
3042  m_pFront = pNewItem;
3043  m_pBack = pNewItem;
3044  m_Count = 1;
3045  }
3046  else
3047  {
3048  pNewItem->pPrev = m_pBack;
3049  m_pBack->pNext = pNewItem;
3050  m_pBack = pNewItem;
3051  ++m_Count;
3052  }
3053  return pNewItem;
3054 }
3055 
3056 template<typename T>
3057 VmaListItem<T>* VmaRawList<T>::PushFront()
3058 {
3059  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3060  pNewItem->pPrev = VMA_NULL;
3061  if(IsEmpty())
3062  {
3063  pNewItem->pNext = VMA_NULL;
3064  m_pFront = pNewItem;
3065  m_pBack = pNewItem;
3066  m_Count = 1;
3067  }
3068  else
3069  {
3070  pNewItem->pNext = m_pFront;
3071  m_pFront->pPrev = pNewItem;
3072  m_pFront = pNewItem;
3073  ++m_Count;
3074  }
3075  return pNewItem;
3076 }
3077 
3078 template<typename T>
3079 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
3080 {
3081  ItemType* const pNewItem = PushBack();
3082  pNewItem->Value = value;
3083  return pNewItem;
3084 }
3085 
3086 template<typename T>
3087 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
3088 {
3089  ItemType* const pNewItem = PushFront();
3090  pNewItem->Value = value;
3091  return pNewItem;
3092 }
3093 
3094 template<typename T>
3095 void VmaRawList<T>::PopBack()
3096 {
3097  VMA_HEAVY_ASSERT(m_Count > 0);
3098  ItemType* const pBackItem = m_pBack;
3099  ItemType* const pPrevItem = pBackItem->pPrev;
3100  if(pPrevItem != VMA_NULL)
3101  {
3102  pPrevItem->pNext = VMA_NULL;
3103  }
3104  m_pBack = pPrevItem;
3105  m_ItemAllocator.Free(pBackItem);
3106  --m_Count;
3107 }
3108 
3109 template<typename T>
3110 void VmaRawList<T>::PopFront()
3111 {
3112  VMA_HEAVY_ASSERT(m_Count > 0);
3113  ItemType* const pFrontItem = m_pFront;
3114  ItemType* const pNextItem = pFrontItem->pNext;
3115  if(pNextItem != VMA_NULL)
3116  {
3117  pNextItem->pPrev = VMA_NULL;
3118  }
3119  m_pFront = pNextItem;
3120  m_ItemAllocator.Free(pFrontItem);
3121  --m_Count;
3122 }
3123 
3124 template<typename T>
3125 void VmaRawList<T>::Remove(ItemType* pItem)
3126 {
3127  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
3128  VMA_HEAVY_ASSERT(m_Count > 0);
3129 
3130  if(pItem->pPrev != VMA_NULL)
3131  {
3132  pItem->pPrev->pNext = pItem->pNext;
3133  }
3134  else
3135  {
3136  VMA_HEAVY_ASSERT(m_pFront == pItem);
3137  m_pFront = pItem->pNext;
3138  }
3139 
3140  if(pItem->pNext != VMA_NULL)
3141  {
3142  pItem->pNext->pPrev = pItem->pPrev;
3143  }
3144  else
3145  {
3146  VMA_HEAVY_ASSERT(m_pBack == pItem);
3147  m_pBack = pItem->pPrev;
3148  }
3149 
3150  m_ItemAllocator.Free(pItem);
3151  --m_Count;
3152 }
3153 
3154 template<typename T>
3155 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
3156 {
3157  if(pItem != VMA_NULL)
3158  {
3159  ItemType* const prevItem = pItem->pPrev;
3160  ItemType* const newItem = m_ItemAllocator.Alloc();
3161  newItem->pPrev = prevItem;
3162  newItem->pNext = pItem;
3163  pItem->pPrev = newItem;
3164  if(prevItem != VMA_NULL)
3165  {
3166  prevItem->pNext = newItem;
3167  }
3168  else
3169  {
3170  VMA_HEAVY_ASSERT(m_pFront == pItem);
3171  m_pFront = newItem;
3172  }
3173  ++m_Count;
3174  return newItem;
3175  }
3176  else
3177  return PushBack();
3178 }
3179 
3180 template<typename T>
3181 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
3182 {
3183  if(pItem != VMA_NULL)
3184  {
3185  ItemType* const nextItem = pItem->pNext;
3186  ItemType* const newItem = m_ItemAllocator.Alloc();
3187  newItem->pNext = nextItem;
3188  newItem->pPrev = pItem;
3189  pItem->pNext = newItem;
3190  if(nextItem != VMA_NULL)
3191  {
3192  nextItem->pPrev = newItem;
3193  }
3194  else
3195  {
3196  VMA_HEAVY_ASSERT(m_pBack == pItem);
3197  m_pBack = newItem;
3198  }
3199  ++m_Count;
3200  return newItem;
3201  }
3202  else
3203  return PushFront();
3204 }
3205 
3206 template<typename T>
3207 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
3208 {
3209  ItemType* const newItem = InsertBefore(pItem);
3210  newItem->Value = value;
3211  return newItem;
3212 }
3213 
3214 template<typename T>
3215 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
3216 {
3217  ItemType* const newItem = InsertAfter(pItem);
3218  newItem->Value = value;
3219  return newItem;
3220 }
3221 
3222 template<typename T, typename AllocatorT>
3223 class VmaList
3224 {
3225 public:
3226  class iterator
3227  {
3228  public:
3229  iterator() :
3230  m_pList(VMA_NULL),
3231  m_pItem(VMA_NULL)
3232  {
3233  }
3234 
3235  T& operator*() const
3236  {
3237  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3238  return m_pItem->Value;
3239  }
3240  T* operator->() const
3241  {
3242  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3243  return &m_pItem->Value;
3244  }
3245 
3246  iterator& operator++()
3247  {
3248  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3249  m_pItem = m_pItem->pNext;
3250  return *this;
3251  }
3252  iterator& operator--()
3253  {
3254  if(m_pItem != VMA_NULL)
3255  {
3256  m_pItem = m_pItem->pPrev;
3257  }
3258  else
3259  {
3260  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3261  m_pItem = m_pList->Back();
3262  }
3263  return *this;
3264  }
3265 
3266  iterator operator++(int)
3267  {
3268  iterator result = *this;
3269  ++*this;
3270  return result;
3271  }
3272  iterator operator--(int)
3273  {
3274  iterator result = *this;
3275  --*this;
3276  return result;
3277  }
3278 
3279  bool operator==(const iterator& rhs) const
3280  {
3281  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3282  return m_pItem == rhs.m_pItem;
3283  }
3284  bool operator!=(const iterator& rhs) const
3285  {
3286  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3287  return m_pItem != rhs.m_pItem;
3288  }
3289 
3290  private:
3291  VmaRawList<T>* m_pList;
3292  VmaListItem<T>* m_pItem;
3293 
3294  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
3295  m_pList(pList),
3296  m_pItem(pItem)
3297  {
3298  }
3299 
3300  friend class VmaList<T, AllocatorT>;
3301  };
3302 
3303  class const_iterator
3304  {
3305  public:
3306  const_iterator() :
3307  m_pList(VMA_NULL),
3308  m_pItem(VMA_NULL)
3309  {
3310  }
3311 
3312  const_iterator(const iterator& src) :
3313  m_pList(src.m_pList),
3314  m_pItem(src.m_pItem)
3315  {
3316  }
3317 
3318  const T& operator*() const
3319  {
3320  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3321  return m_pItem->Value;
3322  }
3323  const T* operator->() const
3324  {
3325  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3326  return &m_pItem->Value;
3327  }
3328 
3329  const_iterator& operator++()
3330  {
3331  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3332  m_pItem = m_pItem->pNext;
3333  return *this;
3334  }
3335  const_iterator& operator--()
3336  {
3337  if(m_pItem != VMA_NULL)
3338  {
3339  m_pItem = m_pItem->pPrev;
3340  }
3341  else
3342  {
3343  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3344  m_pItem = m_pList->Back();
3345  }
3346  return *this;
3347  }
3348 
3349  const_iterator operator++(int)
3350  {
3351  const_iterator result = *this;
3352  ++*this;
3353  return result;
3354  }
3355  const_iterator operator--(int)
3356  {
3357  const_iterator result = *this;
3358  --*this;
3359  return result;
3360  }
3361 
3362  bool operator==(const const_iterator& rhs) const
3363  {
3364  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3365  return m_pItem == rhs.m_pItem;
3366  }
3367  bool operator!=(const const_iterator& rhs) const
3368  {
3369  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3370  return m_pItem != rhs.m_pItem;
3371  }
3372 
3373  private:
3374  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
3375  m_pList(pList),
3376  m_pItem(pItem)
3377  {
3378  }
3379 
3380  const VmaRawList<T>* m_pList;
3381  const VmaListItem<T>* m_pItem;
3382 
3383  friend class VmaList<T, AllocatorT>;
3384  };
3385 
3386  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
3387 
3388  bool empty() const { return m_RawList.IsEmpty(); }
3389  size_t size() const { return m_RawList.GetCount(); }
3390 
3391  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
3392  iterator end() { return iterator(&m_RawList, VMA_NULL); }
3393 
3394  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
3395  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
3396 
3397  void clear() { m_RawList.Clear(); }
3398  void push_back(const T& value) { m_RawList.PushBack(value); }
3399  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
3400  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
3401 
3402 private:
3403  VmaRawList<T> m_RawList;
3404 };
3405 
3406 #endif // #if VMA_USE_STL_LIST
3407 
3409 // class VmaMap
3410 
3411 // Unused in this version.
3412 #if 0
3413 
3414 #if VMA_USE_STL_UNORDERED_MAP
3415 
3416 #define VmaPair std::pair
3417 
3418 #define VMA_MAP_TYPE(KeyT, ValueT) \
3419  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
3420 
3421 #else // #if VMA_USE_STL_UNORDERED_MAP
3422 
3423 template<typename T1, typename T2>
3424 struct VmaPair
3425 {
3426  T1 first;
3427  T2 second;
3428 
3429  VmaPair() : first(), second() { }
3430  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
3431 };
3432 
3433 /* Class compatible with subset of interface of std::unordered_map.
3434 KeyT, ValueT must be POD because they will be stored in VmaVector.
3435 */
3436 template<typename KeyT, typename ValueT>
3437 class VmaMap
3438 {
3439 public:
3440  typedef VmaPair<KeyT, ValueT> PairType;
3441  typedef PairType* iterator;
3442 
3443  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
3444 
3445  iterator begin() { return m_Vector.begin(); }
3446  iterator end() { return m_Vector.end(); }
3447 
3448  void insert(const PairType& pair);
3449  iterator find(const KeyT& key);
3450  void erase(iterator it);
3451 
3452 private:
3453  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
3454 };
3455 
3456 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
3457 
3458 template<typename FirstT, typename SecondT>
3459 struct VmaPairFirstLess
3460 {
3461  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
3462  {
3463  return lhs.first < rhs.first;
3464  }
3465  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
3466  {
3467  return lhs.first < rhsFirst;
3468  }
3469 };
3470 
3471 template<typename KeyT, typename ValueT>
3472 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
3473 {
3474  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3475  m_Vector.data(),
3476  m_Vector.data() + m_Vector.size(),
3477  pair,
3478  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
3479  VmaVectorInsert(m_Vector, indexToInsert, pair);
3480 }
3481 
3482 template<typename KeyT, typename ValueT>
3483 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
3484 {
3485  PairType* it = VmaBinaryFindFirstNotLess(
3486  m_Vector.data(),
3487  m_Vector.data() + m_Vector.size(),
3488  key,
3489  VmaPairFirstLess<KeyT, ValueT>());
3490  if((it != m_Vector.end()) && (it->first == key))
3491  {
3492  return it;
3493  }
3494  else
3495  {
3496  return m_Vector.end();
3497  }
3498 }
3499 
3500 template<typename KeyT, typename ValueT>
3501 void VmaMap<KeyT, ValueT>::erase(iterator it)
3502 {
3503  VmaVectorRemove(m_Vector, it - m_Vector.begin());
3504 }
3505 
3506 #endif // #if VMA_USE_STL_UNORDERED_MAP
3507 
3508 #endif // #if 0
3509 
3511 
3512 class VmaDeviceMemoryBlock;
3513 
3514 struct VmaAllocation_T
3515 {
3516 private:
3517  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
3518 
3519  enum FLAGS
3520  {
3521  FLAG_USER_DATA_STRING = 0x01,
3522  };
3523 
3524 public:
3525  enum ALLOCATION_TYPE
3526  {
3527  ALLOCATION_TYPE_NONE,
3528  ALLOCATION_TYPE_BLOCK,
3529  ALLOCATION_TYPE_DEDICATED,
3530  };
3531 
3532  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
3533  m_Alignment(1),
3534  m_Size(0),
3535  m_pUserData(VMA_NULL),
3536  m_LastUseFrameIndex(currentFrameIndex),
3537  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
3538  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
3539  m_MapCount(0),
3540  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
3541  {
3542  }
3543 
3544  ~VmaAllocation_T()
3545  {
3546  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
3547 
3548  // Check if owned string was freed.
3549  VMA_ASSERT(m_pUserData == VMA_NULL);
3550  }
3551 
3552  void InitBlockAllocation(
3553  VmaPool hPool,
3554  VmaDeviceMemoryBlock* block,
3555  VkDeviceSize offset,
3556  VkDeviceSize alignment,
3557  VkDeviceSize size,
3558  VmaSuballocationType suballocationType,
3559  bool mapped,
3560  bool canBecomeLost)
3561  {
3562  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3563  VMA_ASSERT(block != VMA_NULL);
3564  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3565  m_Alignment = alignment;
3566  m_Size = size;
3567  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3568  m_SuballocationType = (uint8_t)suballocationType;
3569  m_BlockAllocation.m_hPool = hPool;
3570  m_BlockAllocation.m_Block = block;
3571  m_BlockAllocation.m_Offset = offset;
3572  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
3573  }
3574 
3575  void InitLost()
3576  {
3577  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3578  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
3579  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3580  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
3581  m_BlockAllocation.m_Block = VMA_NULL;
3582  m_BlockAllocation.m_Offset = 0;
3583  m_BlockAllocation.m_CanBecomeLost = true;
3584  }
3585 
3586  void ChangeBlockAllocation(
3587  VmaAllocator hAllocator,
3588  VmaDeviceMemoryBlock* block,
3589  VkDeviceSize offset);
3590 
3591  // pMappedData not null means allocation is created with MAPPED flag.
3592  void InitDedicatedAllocation(
3593  uint32_t memoryTypeIndex,
3594  VkDeviceMemory hMemory,
3595  VmaSuballocationType suballocationType,
3596  void* pMappedData,
3597  VkDeviceSize size)
3598  {
3599  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3600  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
3601  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
3602  m_Alignment = 0;
3603  m_Size = size;
3604  m_SuballocationType = (uint8_t)suballocationType;
3605  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3606  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
3607  m_DedicatedAllocation.m_hMemory = hMemory;
3608  m_DedicatedAllocation.m_pMappedData = pMappedData;
3609  }
3610 
3611  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
3612  VkDeviceSize GetAlignment() const { return m_Alignment; }
3613  VkDeviceSize GetSize() const { return m_Size; }
3614  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
3615  void* GetUserData() const { return m_pUserData; }
3616  void SetUserData(VmaAllocator hAllocator, void* pUserData);
3617  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
3618 
3619  VmaDeviceMemoryBlock* GetBlock() const
3620  {
3621  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
3622  return m_BlockAllocation.m_Block;
3623  }
3624  VkDeviceSize GetOffset() const;
3625  VkDeviceMemory GetMemory() const;
3626  uint32_t GetMemoryTypeIndex() const;
3627  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
3628  void* GetMappedData() const;
3629  bool CanBecomeLost() const;
3630  VmaPool GetPool() const;
3631 
3632  uint32_t GetLastUseFrameIndex() const
3633  {
3634  return m_LastUseFrameIndex.load();
3635  }
3636  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
3637  {
3638  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
3639  }
3640  /*
3641  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
3642  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
3643  - Else, returns false.
3644 
3645  If hAllocation is already lost, assert - you should not call it then.
3646  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
3647  */
3648  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3649 
3650  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
3651  {
3652  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
3653  outInfo.blockCount = 1;
3654  outInfo.allocationCount = 1;
3655  outInfo.unusedRangeCount = 0;
3656  outInfo.usedBytes = m_Size;
3657  outInfo.unusedBytes = 0;
3658  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
3659  outInfo.unusedRangeSizeMin = UINT64_MAX;
3660  outInfo.unusedRangeSizeMax = 0;
3661  }
3662 
3663  void BlockAllocMap();
3664  void BlockAllocUnmap();
3665  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
3666  void DedicatedAllocUnmap(VmaAllocator hAllocator);
3667 
3668 private:
3669  VkDeviceSize m_Alignment;
3670  VkDeviceSize m_Size;
3671  void* m_pUserData;
3672  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
3673  uint8_t m_Type; // ALLOCATION_TYPE
3674  uint8_t m_SuballocationType; // VmaSuballocationType
3675  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
3676  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
3677  uint8_t m_MapCount;
3678  uint8_t m_Flags; // enum FLAGS
3679 
3680  // Allocation out of VmaDeviceMemoryBlock.
3681  struct BlockAllocation
3682  {
3683  VmaPool m_hPool; // Null if belongs to general memory.
3684  VmaDeviceMemoryBlock* m_Block;
3685  VkDeviceSize m_Offset;
3686  bool m_CanBecomeLost;
3687  };
3688 
3689  // Allocation for an object that has its own private VkDeviceMemory.
3690  struct DedicatedAllocation
3691  {
3692  uint32_t m_MemoryTypeIndex;
3693  VkDeviceMemory m_hMemory;
3694  void* m_pMappedData; // Not null means memory is mapped.
3695  };
3696 
3697  union
3698  {
3699  // Allocation out of VmaDeviceMemoryBlock.
3700  BlockAllocation m_BlockAllocation;
3701  // Allocation for an object that has its own private VkDeviceMemory.
3702  DedicatedAllocation m_DedicatedAllocation;
3703  };
3704 
3705  void FreeUserDataString(VmaAllocator hAllocator);
3706 };
3707 
3708 /*
3709 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
3710 allocated memory block or free.
3711 */
3712 struct VmaSuballocation
3713 {
3714  VkDeviceSize offset;
3715  VkDeviceSize size;
3716  VmaAllocation hAllocation;
3717  VmaSuballocationType type;
3718 };
3719 
3720 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
3721 
3722 // Cost of one additional allocation lost, as equivalent in bytes.
3723 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
3724 
3725 /*
3726 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
3727 
3728 If canMakeOtherLost was false:
3729 - item points to a FREE suballocation.
3730 - itemsToMakeLostCount is 0.
3731 
3732 If canMakeOtherLost was true:
3733 - item points to first of sequence of suballocations, which are either FREE,
3734  or point to VmaAllocations that can become lost.
3735 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
3736  the requested allocation to succeed.
3737 */
3738 struct VmaAllocationRequest
3739 {
3740  VkDeviceSize offset;
3741  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
3742  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
3743  VmaSuballocationList::iterator item;
3744  size_t itemsToMakeLostCount;
3745 
3746  VkDeviceSize CalcCost() const
3747  {
3748  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
3749  }
3750 };
3751 
3752 /*
3753 Data structure used for bookkeeping of allocations and unused ranges of memory
3754 in a single VkDeviceMemory block.
3755 */
3756 class VmaBlockMetadata
3757 {
3758 public:
3759  VmaBlockMetadata(VmaAllocator hAllocator);
3760  ~VmaBlockMetadata();
3761  void Init(VkDeviceSize size);
3762 
3763  // Validates all data structures inside this object. If not valid, returns false.
3764  bool Validate() const;
3765  VkDeviceSize GetSize() const { return m_Size; }
3766  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
3767  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
3768  VkDeviceSize GetUnusedRangeSizeMax() const;
3769  // Returns true if this block is empty - contains only single free suballocation.
3770  bool IsEmpty() const;
3771 
3772  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
3773  void AddPoolStats(VmaPoolStats& inoutStats) const;
3774 
3775 #if VMA_STATS_STRING_ENABLED
3776  void PrintDetailedMap(class VmaJsonWriter& json) const;
3777 #endif
3778 
3779  // Creates trivial request for case when block is empty.
3780  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
3781 
3782  // Tries to find a place for suballocation with given parameters inside this block.
3783  // If succeeded, fills pAllocationRequest and returns true.
3784  // If failed, returns false.
3785  bool CreateAllocationRequest(
3786  uint32_t currentFrameIndex,
3787  uint32_t frameInUseCount,
3788  VkDeviceSize bufferImageGranularity,
3789  VkDeviceSize allocSize,
3790  VkDeviceSize allocAlignment,
3791  VmaSuballocationType allocType,
3792  bool canMakeOtherLost,
3793  VmaAllocationRequest* pAllocationRequest);
3794 
3795  bool MakeRequestedAllocationsLost(
3796  uint32_t currentFrameIndex,
3797  uint32_t frameInUseCount,
3798  VmaAllocationRequest* pAllocationRequest);
3799 
3800  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3801 
3802  // Makes actual allocation based on request. Request must already be checked and valid.
3803  void Alloc(
3804  const VmaAllocationRequest& request,
3805  VmaSuballocationType type,
3806  VkDeviceSize allocSize,
3807  VmaAllocation hAllocation);
3808 
3809  // Frees suballocation assigned to given memory region.
3810  void Free(const VmaAllocation allocation);
3811  void FreeAtOffset(VkDeviceSize offset);
3812 
3813 private:
3814  VkDeviceSize m_Size;
3815  uint32_t m_FreeCount;
3816  VkDeviceSize m_SumFreeSize;
3817  VmaSuballocationList m_Suballocations;
3818  // Suballocations that are free and have size greater than certain threshold.
3819  // Sorted by size, ascending.
3820  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
3821 
3822  bool ValidateFreeSuballocationList() const;
3823 
3824  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
3825  // If yes, fills pOffset and returns true. If no, returns false.
3826  bool CheckAllocation(
3827  uint32_t currentFrameIndex,
3828  uint32_t frameInUseCount,
3829  VkDeviceSize bufferImageGranularity,
3830  VkDeviceSize allocSize,
3831  VkDeviceSize allocAlignment,
3832  VmaSuballocationType allocType,
3833  VmaSuballocationList::const_iterator suballocItem,
3834  bool canMakeOtherLost,
3835  VkDeviceSize* pOffset,
3836  size_t* itemsToMakeLostCount,
3837  VkDeviceSize* pSumFreeSize,
3838  VkDeviceSize* pSumItemSize) const;
3839  // Given free suballocation, it merges it with following one, which must also be free.
3840  void MergeFreeWithNext(VmaSuballocationList::iterator item);
3841  // Releases given suballocation, making it free.
3842  // Merges it with adjacent free suballocations if applicable.
3843  // Returns iterator to new free suballocation at this place.
3844  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
3845  // Given free suballocation, it inserts it into sorted list of
3846  // m_FreeSuballocationsBySize if it's suitable.
3847  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
3848  // Given free suballocation, it removes it from sorted list of
3849  // m_FreeSuballocationsBySize if it's suitable.
3850  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
3851 };
3852 
3853 // Helper class that represents mapped memory. Synchronized internally.
3854 class VmaDeviceMemoryMapping
3855 {
3856 public:
3857  VmaDeviceMemoryMapping();
3858  ~VmaDeviceMemoryMapping();
3859 
3860  void* GetMappedData() const { return m_pMappedData; }
3861 
3862  // ppData can be null.
3863  VkResult Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count, void **ppData);
3864  void Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count);
3865 
3866 private:
3867  VMA_MUTEX m_Mutex;
3868  uint32_t m_MapCount;
3869  void* m_pMappedData;
3870 };
3871 
3872 /*
3873 Represents a single block of device memory (`VkDeviceMemory`) with all the
3874 data about its regions (aka suballocations, `VmaAllocation`), assigned and free.
3875 
3876 Thread-safety: This class must be externally synchronized.
3877 */
3878 class VmaDeviceMemoryBlock
3879 {
3880 public:
3881  uint32_t m_MemoryTypeIndex;
3882  VkDeviceMemory m_hMemory;
3883  VmaDeviceMemoryMapping m_Mapping;
3884  VmaBlockMetadata m_Metadata;
3885 
3886  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
3887 
3888  ~VmaDeviceMemoryBlock()
3889  {
3890  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
3891  }
3892 
3893  // Always call after construction.
3894  void Init(
3895  uint32_t newMemoryTypeIndex,
3896  VkDeviceMemory newMemory,
3897  VkDeviceSize newSize);
3898  // Always call before destruction.
3899  void Destroy(VmaAllocator allocator);
3900 
3901  // Validates all data structures inside this object. If not valid, returns false.
3902  bool Validate() const;
3903 
3904  // ppData can be null.
3905  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
3906  void Unmap(VmaAllocator hAllocator, uint32_t count);
3907 };
3908 
3909 struct VmaPointerLess
3910 {
3911  bool operator()(const void* lhs, const void* rhs) const
3912  {
3913  return lhs < rhs;
3914  }
3915 };
3916 
3917 class VmaDefragmentator;
3918 
3919 /*
3920 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
3921 Vulkan memory type.
3922 
3923 Synchronized internally with a mutex.
3924 */
3925 struct VmaBlockVector
3926 {
3927  VmaBlockVector(
3928  VmaAllocator hAllocator,
3929  uint32_t memoryTypeIndex,
3930  VkDeviceSize preferredBlockSize,
3931  size_t minBlockCount,
3932  size_t maxBlockCount,
3933  VkDeviceSize bufferImageGranularity,
3934  uint32_t frameInUseCount,
3935  bool isCustomPool);
3936  ~VmaBlockVector();
3937 
3938  VkResult CreateMinBlocks();
3939 
3940  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
3941  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
3942  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
3943  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
3944 
3945  void GetPoolStats(VmaPoolStats* pStats);
3946 
3947  bool IsEmpty() const { return m_Blocks.empty(); }
3948 
3949  VkResult Allocate(
3950  VmaPool hCurrentPool,
3951  uint32_t currentFrameIndex,
3952  const VkMemoryRequirements& vkMemReq,
3953  const VmaAllocationCreateInfo& createInfo,
3954  VmaSuballocationType suballocType,
3955  VmaAllocation* pAllocation);
3956 
3957  void Free(
3958  VmaAllocation hAllocation);
3959 
3960  // Adds statistics of this BlockVector to pStats.
3961  void AddStats(VmaStats* pStats);
3962 
3963 #if VMA_STATS_STRING_ENABLED
3964  void PrintDetailedMap(class VmaJsonWriter& json);
3965 #endif
3966 
3967  void MakePoolAllocationsLost(
3968  uint32_t currentFrameIndex,
3969  size_t* pLostAllocationCount);
3970 
3971  VmaDefragmentator* EnsureDefragmentator(
3972  VmaAllocator hAllocator,
3973  uint32_t currentFrameIndex);
3974 
3975  VkResult Defragment(
3976  VmaDefragmentationStats* pDefragmentationStats,
3977  VkDeviceSize& maxBytesToMove,
3978  uint32_t& maxAllocationsToMove);
3979 
3980  void DestroyDefragmentator();
3981 
3982 private:
3983  friend class VmaDefragmentator;
3984 
3985  const VmaAllocator m_hAllocator;
3986  const uint32_t m_MemoryTypeIndex;
3987  const VkDeviceSize m_PreferredBlockSize;
3988  const size_t m_MinBlockCount;
3989  const size_t m_MaxBlockCount;
3990  const VkDeviceSize m_BufferImageGranularity;
3991  const uint32_t m_FrameInUseCount;
3992  const bool m_IsCustomPool;
3993  VMA_MUTEX m_Mutex;
3994  // Incrementally sorted by sumFreeSize, ascending.
3995  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
3996  /* There can be at most one allocation that is completely empty - a
3997  hysteresis to avoid pessimistic case of alternating creation and destruction
3998  of a VkDeviceMemory. */
3999  bool m_HasEmptyBlock;
4000  VmaDefragmentator* m_pDefragmentator;
4001 
4002  size_t CalcMaxBlockSize() const;
4003 
4004  // Finds and removes given block from vector.
4005  void Remove(VmaDeviceMemoryBlock* pBlock);
4006 
4007  // Performs single step in sorting m_Blocks. They may not be fully sorted
4008  // after this call.
4009  void IncrementallySortBlocks();
4010 
4011  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
4012 };
4013 
4014 struct VmaPool_T
4015 {
4016 public:
4017  VmaBlockVector m_BlockVector;
4018 
4019  // Takes ownership.
4020  VmaPool_T(
4021  VmaAllocator hAllocator,
4022  const VmaPoolCreateInfo& createInfo);
4023  ~VmaPool_T();
4024 
4025  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
4026 
4027 #if VMA_STATS_STRING_ENABLED
4028  //void PrintDetailedMap(class VmaStringBuilder& sb);
4029 #endif
4030 };
4031 
4032 class VmaDefragmentator
4033 {
4034  const VmaAllocator m_hAllocator;
4035  VmaBlockVector* const m_pBlockVector;
4036  uint32_t m_CurrentFrameIndex;
4037  VkDeviceSize m_BytesMoved;
4038  uint32_t m_AllocationsMoved;
4039 
4040  struct AllocationInfo
4041  {
4042  VmaAllocation m_hAllocation;
4043  VkBool32* m_pChanged;
4044 
4045  AllocationInfo() :
4046  m_hAllocation(VK_NULL_HANDLE),
4047  m_pChanged(VMA_NULL)
4048  {
4049  }
4050  };
4051 
4052  struct AllocationInfoSizeGreater
4053  {
4054  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
4055  {
4056  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
4057  }
4058  };
4059 
4060  // Used between AddAllocation and Defragment.
4061  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
4062 
4063  struct BlockInfo
4064  {
4065  VmaDeviceMemoryBlock* m_pBlock;
4066  bool m_HasNonMovableAllocations;
4067  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
4068 
4069  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
4070  m_pBlock(VMA_NULL),
4071  m_HasNonMovableAllocations(true),
4072  m_Allocations(pAllocationCallbacks),
4073  m_pMappedDataForDefragmentation(VMA_NULL)
4074  {
4075  }
4076 
4077  void CalcHasNonMovableAllocations()
4078  {
4079  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
4080  const size_t defragmentAllocCount = m_Allocations.size();
4081  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
4082  }
4083 
4084  void SortAllocationsBySizeDescecnding()
4085  {
4086  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
4087  }
4088 
4089  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
4090  void Unmap(VmaAllocator hAllocator);
4091 
4092  private:
4093  // Not null if mapped for defragmentation only, not originally mapped.
4094  void* m_pMappedDataForDefragmentation;
4095  };
4096 
4097  struct BlockPointerLess
4098  {
4099  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
4100  {
4101  return pLhsBlockInfo->m_pBlock < pRhsBlock;
4102  }
4103  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
4104  {
4105  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
4106  }
4107  };
4108 
4109  // 1. Blocks with some non-movable allocations go first.
4110  // 2. Blocks with smaller sumFreeSize go first.
4111  struct BlockInfoCompareMoveDestination
4112  {
4113  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
4114  {
4115  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
4116  {
4117  return true;
4118  }
4119  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
4120  {
4121  return false;
4122  }
4123  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
4124  {
4125  return true;
4126  }
4127  return false;
4128  }
4129  };
4130 
4131  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
4132  BlockInfoVector m_Blocks;
4133 
4134  VkResult DefragmentRound(
4135  VkDeviceSize maxBytesToMove,
4136  uint32_t maxAllocationsToMove);
4137 
4138  static bool MoveMakesSense(
4139  size_t dstBlockIndex, VkDeviceSize dstOffset,
4140  size_t srcBlockIndex, VkDeviceSize srcOffset);
4141 
4142 public:
4143  VmaDefragmentator(
4144  VmaAllocator hAllocator,
4145  VmaBlockVector* pBlockVector,
4146  uint32_t currentFrameIndex);
4147 
4148  ~VmaDefragmentator();
4149 
4150  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
4151  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
4152 
4153  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
4154 
4155  VkResult Defragment(
4156  VkDeviceSize maxBytesToMove,
4157  uint32_t maxAllocationsToMove);
4158 };
4159 
4160 // Main allocator object.
4161 struct VmaAllocator_T
4162 {
4163  bool m_UseMutex;
4164  bool m_UseKhrDedicatedAllocation;
4165  VkDevice m_hDevice;
4166  bool m_AllocationCallbacksSpecified;
4167  VkAllocationCallbacks m_AllocationCallbacks;
4168  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
4169 
4170  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
4171  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
4172  VMA_MUTEX m_HeapSizeLimitMutex;
4173 
4174  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
4175  VkPhysicalDeviceMemoryProperties m_MemProps;
4176 
4177  // Default pools.
4178  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
4179 
4180  // Each vector is sorted by memory (handle value).
4181  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
4182  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
4183  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
4184 
4185  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
4186  ~VmaAllocator_T();
4187 
4188  const VkAllocationCallbacks* GetAllocationCallbacks() const
4189  {
4190  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
4191  }
4192  const VmaVulkanFunctions& GetVulkanFunctions() const
4193  {
4194  return m_VulkanFunctions;
4195  }
4196 
4197  VkDeviceSize GetBufferImageGranularity() const
4198  {
4199  return VMA_MAX(
4200  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
4201  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
4202  }
4203 
4204  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
4205  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
4206 
4207  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
4208  {
4209  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
4210  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
4211  }
4212 
4213  void GetBufferMemoryRequirements(
4214  VkBuffer hBuffer,
4215  VkMemoryRequirements& memReq,
4216  bool& requiresDedicatedAllocation,
4217  bool& prefersDedicatedAllocation) const;
4218  void GetImageMemoryRequirements(
4219  VkImage hImage,
4220  VkMemoryRequirements& memReq,
4221  bool& requiresDedicatedAllocation,
4222  bool& prefersDedicatedAllocation) const;
4223 
4224  // Main allocation function.
4225  VkResult AllocateMemory(
4226  const VkMemoryRequirements& vkMemReq,
4227  bool requiresDedicatedAllocation,
4228  bool prefersDedicatedAllocation,
4229  VkBuffer dedicatedBuffer,
4230  VkImage dedicatedImage,
4231  const VmaAllocationCreateInfo& createInfo,
4232  VmaSuballocationType suballocType,
4233  VmaAllocation* pAllocation);
4234 
4235  // Main deallocation function.
4236  void FreeMemory(const VmaAllocation allocation);
4237 
4238  void CalculateStats(VmaStats* pStats);
4239 
4240 #if VMA_STATS_STRING_ENABLED
4241  void PrintDetailedMap(class VmaJsonWriter& json);
4242 #endif
4243 
4244  VkResult Defragment(
4245  VmaAllocation* pAllocations,
4246  size_t allocationCount,
4247  VkBool32* pAllocationsChanged,
4248  const VmaDefragmentationInfo* pDefragmentationInfo,
4249  VmaDefragmentationStats* pDefragmentationStats);
4250 
4251  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
4252  bool TouchAllocation(VmaAllocation hAllocation);
4253 
4254  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
4255  void DestroyPool(VmaPool pool);
4256  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
4257 
4258  void SetCurrentFrameIndex(uint32_t frameIndex);
4259 
4260  void MakePoolAllocationsLost(
4261  VmaPool hPool,
4262  size_t* pLostAllocationCount);
4263 
4264  void CreateLostAllocation(VmaAllocation* pAllocation);
4265 
4266  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
4267  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
4268 
4269  VkResult Map(VmaAllocation hAllocation, void** ppData);
4270  void Unmap(VmaAllocation hAllocation);
4271 
4272 private:
4273  VkDeviceSize m_PreferredLargeHeapBlockSize;
4274 
4275  VkPhysicalDevice m_PhysicalDevice;
4276  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
4277 
4278  VMA_MUTEX m_PoolsMutex;
4279  // Protected by m_PoolsMutex. Sorted by pointer value.
4280  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
4281 
4282  VmaVulkanFunctions m_VulkanFunctions;
4283 
4284  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
4285 
4286  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
4287 
4288  VkResult AllocateMemoryOfType(
4289  const VkMemoryRequirements& vkMemReq,
4290  bool dedicatedAllocation,
4291  VkBuffer dedicatedBuffer,
4292  VkImage dedicatedImage,
4293  const VmaAllocationCreateInfo& createInfo,
4294  uint32_t memTypeIndex,
4295  VmaSuballocationType suballocType,
4296  VmaAllocation* pAllocation);
4297 
4298  // Allocates and registers new VkDeviceMemory specifically for single allocation.
4299  VkResult AllocateDedicatedMemory(
4300  VkDeviceSize size,
4301  VmaSuballocationType suballocType,
4302  uint32_t memTypeIndex,
4303  bool map,
4304  bool isUserDataString,
4305  void* pUserData,
4306  VkBuffer dedicatedBuffer,
4307  VkImage dedicatedImage,
4308  VmaAllocation* pAllocation);
4309 
4310  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
4311  void FreeDedicatedMemory(VmaAllocation allocation);
4312 };
4313 
4315 // Memory allocation #2 after VmaAllocator_T definition
4316 
4317 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
4318 {
4319  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
4320 }
4321 
4322 static void VmaFree(VmaAllocator hAllocator, void* ptr)
4323 {
4324  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
4325 }
4326 
4327 template<typename T>
4328 static T* VmaAllocate(VmaAllocator hAllocator)
4329 {
4330  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
4331 }
4332 
4333 template<typename T>
4334 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
4335 {
4336  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
4337 }
4338 
4339 template<typename T>
4340 static void vma_delete(VmaAllocator hAllocator, T* ptr)
4341 {
4342  if(ptr != VMA_NULL)
4343  {
4344  ptr->~T();
4345  VmaFree(hAllocator, ptr);
4346  }
4347 }
4348 
4349 template<typename T>
4350 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
4351 {
4352  if(ptr != VMA_NULL)
4353  {
4354  for(size_t i = count; i--; )
4355  ptr[i].~T();
4356  VmaFree(hAllocator, ptr);
4357  }
4358 }
4359 
4361 // VmaStringBuilder
4362 
4363 #if VMA_STATS_STRING_ENABLED
4364 
4365 class VmaStringBuilder
4366 {
4367 public:
4368  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
4369  size_t GetLength() const { return m_Data.size(); }
4370  const char* GetData() const { return m_Data.data(); }
4371 
4372  void Add(char ch) { m_Data.push_back(ch); }
4373  void Add(const char* pStr);
4374  void AddNewLine() { Add('\n'); }
4375  void AddNumber(uint32_t num);
4376  void AddNumber(uint64_t num);
4377  void AddPointer(const void* ptr);
4378 
4379 private:
4380  VmaVector< char, VmaStlAllocator<char> > m_Data;
4381 };
4382 
4383 void VmaStringBuilder::Add(const char* pStr)
4384 {
4385  const size_t strLen = strlen(pStr);
4386  if(strLen > 0)
4387  {
4388  const size_t oldCount = m_Data.size();
4389  m_Data.resize(oldCount + strLen);
4390  memcpy(m_Data.data() + oldCount, pStr, strLen);
4391  }
4392 }
4393 
4394 void VmaStringBuilder::AddNumber(uint32_t num)
4395 {
4396  char buf[11];
4397  VmaUint32ToStr(buf, sizeof(buf), num);
4398  Add(buf);
4399 }
4400 
4401 void VmaStringBuilder::AddNumber(uint64_t num)
4402 {
4403  char buf[21];
4404  VmaUint64ToStr(buf, sizeof(buf), num);
4405  Add(buf);
4406 }
4407 
4408 void VmaStringBuilder::AddPointer(const void* ptr)
4409 {
4410  char buf[21];
4411  VmaPtrToStr(buf, sizeof(buf), ptr);
4412  Add(buf);
4413 }
4414 
4415 #endif // #if VMA_STATS_STRING_ENABLED
4416 
4418 // VmaJsonWriter
4419 
4420 #if VMA_STATS_STRING_ENABLED
4421 
4422 class VmaJsonWriter
4423 {
4424 public:
4425  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
4426  ~VmaJsonWriter();
4427 
4428  void BeginObject(bool singleLine = false);
4429  void EndObject();
4430 
4431  void BeginArray(bool singleLine = false);
4432  void EndArray();
4433 
4434  void WriteString(const char* pStr);
4435  void BeginString(const char* pStr = VMA_NULL);
4436  void ContinueString(const char* pStr);
4437  void ContinueString(uint32_t n);
4438  void ContinueString(uint64_t n);
4439  void ContinueString_Pointer(const void* ptr);
4440  void EndString(const char* pStr = VMA_NULL);
4441 
4442  void WriteNumber(uint32_t n);
4443  void WriteNumber(uint64_t n);
4444  void WriteBool(bool b);
4445  void WriteNull();
4446 
4447 private:
4448  static const char* const INDENT;
4449 
4450  enum COLLECTION_TYPE
4451  {
4452  COLLECTION_TYPE_OBJECT,
4453  COLLECTION_TYPE_ARRAY,
4454  };
4455  struct StackItem
4456  {
4457  COLLECTION_TYPE type;
4458  uint32_t valueCount;
4459  bool singleLineMode;
4460  };
4461 
4462  VmaStringBuilder& m_SB;
4463  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
4464  bool m_InsideString;
4465 
4466  void BeginValue(bool isString);
4467  void WriteIndent(bool oneLess = false);
4468 };
4469 
4470 const char* const VmaJsonWriter::INDENT = " ";
4471 
4472 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
4473  m_SB(sb),
4474  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
4475  m_InsideString(false)
4476 {
4477 }
4478 
4479 VmaJsonWriter::~VmaJsonWriter()
4480 {
4481  VMA_ASSERT(!m_InsideString);
4482  VMA_ASSERT(m_Stack.empty());
4483 }
4484 
4485 void VmaJsonWriter::BeginObject(bool singleLine)
4486 {
4487  VMA_ASSERT(!m_InsideString);
4488 
4489  BeginValue(false);
4490  m_SB.Add('{');
4491 
4492  StackItem item;
4493  item.type = COLLECTION_TYPE_OBJECT;
4494  item.valueCount = 0;
4495  item.singleLineMode = singleLine;
4496  m_Stack.push_back(item);
4497 }
4498 
4499 void VmaJsonWriter::EndObject()
4500 {
4501  VMA_ASSERT(!m_InsideString);
4502 
4503  WriteIndent(true);
4504  m_SB.Add('}');
4505 
4506  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
4507  m_Stack.pop_back();
4508 }
4509 
4510 void VmaJsonWriter::BeginArray(bool singleLine)
4511 {
4512  VMA_ASSERT(!m_InsideString);
4513 
4514  BeginValue(false);
4515  m_SB.Add('[');
4516 
4517  StackItem item;
4518  item.type = COLLECTION_TYPE_ARRAY;
4519  item.valueCount = 0;
4520  item.singleLineMode = singleLine;
4521  m_Stack.push_back(item);
4522 }
4523 
4524 void VmaJsonWriter::EndArray()
4525 {
4526  VMA_ASSERT(!m_InsideString);
4527 
4528  WriteIndent(true);
4529  m_SB.Add(']');
4530 
4531  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
4532  m_Stack.pop_back();
4533 }
4534 
4535 void VmaJsonWriter::WriteString(const char* pStr)
4536 {
4537  BeginString(pStr);
4538  EndString();
4539 }
4540 
4541 void VmaJsonWriter::BeginString(const char* pStr)
4542 {
4543  VMA_ASSERT(!m_InsideString);
4544 
4545  BeginValue(true);
4546  m_SB.Add('"');
4547  m_InsideString = true;
4548  if(pStr != VMA_NULL && pStr[0] != '\0')
4549  {
4550  ContinueString(pStr);
4551  }
4552 }
4553 
4554 void VmaJsonWriter::ContinueString(const char* pStr)
4555 {
4556  VMA_ASSERT(m_InsideString);
4557 
4558  const size_t strLen = strlen(pStr);
4559  for(size_t i = 0; i < strLen; ++i)
4560  {
4561  char ch = pStr[i];
4562  if(ch == '\'')
4563  {
4564  m_SB.Add("\\\\");
4565  }
4566  else if(ch == '"')
4567  {
4568  m_SB.Add("\\\"");
4569  }
4570  else if(ch >= 32)
4571  {
4572  m_SB.Add(ch);
4573  }
4574  else switch(ch)
4575  {
4576  case '\b':
4577  m_SB.Add("\\b");
4578  break;
4579  case '\f':
4580  m_SB.Add("\\f");
4581  break;
4582  case '\n':
4583  m_SB.Add("\\n");
4584  break;
4585  case '\r':
4586  m_SB.Add("\\r");
4587  break;
4588  case '\t':
4589  m_SB.Add("\\t");
4590  break;
4591  default:
4592  VMA_ASSERT(0 && "Character not currently supported.");
4593  break;
4594  }
4595  }
4596 }
4597 
4598 void VmaJsonWriter::ContinueString(uint32_t n)
4599 {
4600  VMA_ASSERT(m_InsideString);
4601  m_SB.AddNumber(n);
4602 }
4603 
4604 void VmaJsonWriter::ContinueString(uint64_t n)
4605 {
4606  VMA_ASSERT(m_InsideString);
4607  m_SB.AddNumber(n);
4608 }
4609 
4610 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
4611 {
4612  VMA_ASSERT(m_InsideString);
4613  m_SB.AddPointer(ptr);
4614 }
4615 
4616 void VmaJsonWriter::EndString(const char* pStr)
4617 {
4618  VMA_ASSERT(m_InsideString);
4619  if(pStr != VMA_NULL && pStr[0] != '\0')
4620  {
4621  ContinueString(pStr);
4622  }
4623  m_SB.Add('"');
4624  m_InsideString = false;
4625 }
4626 
4627 void VmaJsonWriter::WriteNumber(uint32_t n)
4628 {
4629  VMA_ASSERT(!m_InsideString);
4630  BeginValue(false);
4631  m_SB.AddNumber(n);
4632 }
4633 
4634 void VmaJsonWriter::WriteNumber(uint64_t n)
4635 {
4636  VMA_ASSERT(!m_InsideString);
4637  BeginValue(false);
4638  m_SB.AddNumber(n);
4639 }
4640 
4641 void VmaJsonWriter::WriteBool(bool b)
4642 {
4643  VMA_ASSERT(!m_InsideString);
4644  BeginValue(false);
4645  m_SB.Add(b ? "true" : "false");
4646 }
4647 
4648 void VmaJsonWriter::WriteNull()
4649 {
4650  VMA_ASSERT(!m_InsideString);
4651  BeginValue(false);
4652  m_SB.Add("null");
4653 }
4654 
4655 void VmaJsonWriter::BeginValue(bool isString)
4656 {
4657  if(!m_Stack.empty())
4658  {
4659  StackItem& currItem = m_Stack.back();
4660  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4661  currItem.valueCount % 2 == 0)
4662  {
4663  VMA_ASSERT(isString);
4664  }
4665 
4666  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4667  currItem.valueCount % 2 != 0)
4668  {
4669  m_SB.Add(": ");
4670  }
4671  else if(currItem.valueCount > 0)
4672  {
4673  m_SB.Add(", ");
4674  WriteIndent();
4675  }
4676  else
4677  {
4678  WriteIndent();
4679  }
4680  ++currItem.valueCount;
4681  }
4682 }
4683 
4684 void VmaJsonWriter::WriteIndent(bool oneLess)
4685 {
4686  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
4687  {
4688  m_SB.AddNewLine();
4689 
4690  size_t count = m_Stack.size();
4691  if(count > 0 && oneLess)
4692  {
4693  --count;
4694  }
4695  for(size_t i = 0; i < count; ++i)
4696  {
4697  m_SB.Add(INDENT);
4698  }
4699  }
4700 }
4701 
4702 #endif // #if VMA_STATS_STRING_ENABLED
4703 
4705 
4706 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
4707 {
4708  if(IsUserDataString())
4709  {
4710  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
4711 
4712  FreeUserDataString(hAllocator);
4713 
4714  if(pUserData != VMA_NULL)
4715  {
4716  const char* const newStrSrc = (char*)pUserData;
4717  const size_t newStrLen = strlen(newStrSrc);
4718  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
4719  memcpy(newStrDst, newStrSrc, newStrLen + 1);
4720  m_pUserData = newStrDst;
4721  }
4722  }
4723  else
4724  {
4725  m_pUserData = pUserData;
4726  }
4727 }
4728 
4729 void VmaAllocation_T::ChangeBlockAllocation(
4730  VmaAllocator hAllocator,
4731  VmaDeviceMemoryBlock* block,
4732  VkDeviceSize offset)
4733 {
4734  VMA_ASSERT(block != VMA_NULL);
4735  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4736 
4737  // Move mapping reference counter from old block to new block.
4738  if(block != m_BlockAllocation.m_Block)
4739  {
4740  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
4741  if(IsPersistentMap())
4742  ++mapRefCount;
4743  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
4744  block->Map(hAllocator, mapRefCount, VMA_NULL);
4745  }
4746 
4747  m_BlockAllocation.m_Block = block;
4748  m_BlockAllocation.m_Offset = offset;
4749 }
4750 
4751 VkDeviceSize VmaAllocation_T::GetOffset() const
4752 {
4753  switch(m_Type)
4754  {
4755  case ALLOCATION_TYPE_BLOCK:
4756  return m_BlockAllocation.m_Offset;
4757  case ALLOCATION_TYPE_DEDICATED:
4758  return 0;
4759  default:
4760  VMA_ASSERT(0);
4761  return 0;
4762  }
4763 }
4764 
4765 VkDeviceMemory VmaAllocation_T::GetMemory() const
4766 {
4767  switch(m_Type)
4768  {
4769  case ALLOCATION_TYPE_BLOCK:
4770  return m_BlockAllocation.m_Block->m_hMemory;
4771  case ALLOCATION_TYPE_DEDICATED:
4772  return m_DedicatedAllocation.m_hMemory;
4773  default:
4774  VMA_ASSERT(0);
4775  return VK_NULL_HANDLE;
4776  }
4777 }
4778 
4779 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
4780 {
4781  switch(m_Type)
4782  {
4783  case ALLOCATION_TYPE_BLOCK:
4784  return m_BlockAllocation.m_Block->m_MemoryTypeIndex;
4785  case ALLOCATION_TYPE_DEDICATED:
4786  return m_DedicatedAllocation.m_MemoryTypeIndex;
4787  default:
4788  VMA_ASSERT(0);
4789  return UINT32_MAX;
4790  }
4791 }
4792 
4793 void* VmaAllocation_T::GetMappedData() const
4794 {
4795  switch(m_Type)
4796  {
4797  case ALLOCATION_TYPE_BLOCK:
4798  if(m_MapCount != 0)
4799  {
4800  void* pBlockData = m_BlockAllocation.m_Block->m_Mapping.GetMappedData();
4801  VMA_ASSERT(pBlockData != VMA_NULL);
4802  return (char*)pBlockData + m_BlockAllocation.m_Offset;
4803  }
4804  else
4805  {
4806  return VMA_NULL;
4807  }
4808  break;
4809  case ALLOCATION_TYPE_DEDICATED:
4810  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
4811  return m_DedicatedAllocation.m_pMappedData;
4812  default:
4813  VMA_ASSERT(0);
4814  return VMA_NULL;
4815  }
4816 }
4817 
4818 bool VmaAllocation_T::CanBecomeLost() const
4819 {
4820  switch(m_Type)
4821  {
4822  case ALLOCATION_TYPE_BLOCK:
4823  return m_BlockAllocation.m_CanBecomeLost;
4824  case ALLOCATION_TYPE_DEDICATED:
4825  return false;
4826  default:
4827  VMA_ASSERT(0);
4828  return false;
4829  }
4830 }
4831 
4832 VmaPool VmaAllocation_T::GetPool() const
4833 {
4834  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4835  return m_BlockAllocation.m_hPool;
4836 }
4837 
4838 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4839 {
4840  VMA_ASSERT(CanBecomeLost());
4841 
4842  /*
4843  Warning: This is a carefully designed algorithm.
4844  Do not modify unless you really know what you're doing :)
4845  */
4846  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
4847  for(;;)
4848  {
4849  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
4850  {
4851  VMA_ASSERT(0);
4852  return false;
4853  }
4854  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
4855  {
4856  return false;
4857  }
4858  else // Last use time earlier than current time.
4859  {
4860  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
4861  {
4862  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
4863  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
4864  return true;
4865  }
4866  }
4867  }
4868 }
4869 
4870 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
4871 {
4872  VMA_ASSERT(IsUserDataString());
4873  if(m_pUserData != VMA_NULL)
4874  {
4875  char* const oldStr = (char*)m_pUserData;
4876  const size_t oldStrLen = strlen(oldStr);
4877  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
4878  m_pUserData = VMA_NULL;
4879  }
4880 }
4881 
4882 void VmaAllocation_T::BlockAllocMap()
4883 {
4884  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
4885 
4886  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
4887  {
4888  ++m_MapCount;
4889  }
4890  else
4891  {
4892  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
4893  }
4894 }
4895 
4896 void VmaAllocation_T::BlockAllocUnmap()
4897 {
4898  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
4899 
4900  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
4901  {
4902  --m_MapCount;
4903  }
4904  else
4905  {
4906  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
4907  }
4908 }
4909 
4910 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
4911 {
4912  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
4913 
4914  if(m_MapCount != 0)
4915  {
4916  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
4917  {
4918  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
4919  *ppData = m_DedicatedAllocation.m_pMappedData;
4920  ++m_MapCount;
4921  return VK_SUCCESS;
4922  }
4923  else
4924  {
4925  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
4926  return VK_ERROR_MEMORY_MAP_FAILED;
4927  }
4928  }
4929  else
4930  {
4931  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
4932  hAllocator->m_hDevice,
4933  m_DedicatedAllocation.m_hMemory,
4934  0, // offset
4935  VK_WHOLE_SIZE,
4936  0, // flags
4937  ppData);
4938  if(result == VK_SUCCESS)
4939  {
4940  m_DedicatedAllocation.m_pMappedData = *ppData;
4941  m_MapCount = 1;
4942  }
4943  return result;
4944  }
4945 }
4946 
4947 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
4948 {
4949  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
4950 
4951  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
4952  {
4953  --m_MapCount;
4954  if(m_MapCount == 0)
4955  {
4956  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
4957  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
4958  hAllocator->m_hDevice,
4959  m_DedicatedAllocation.m_hMemory);
4960  }
4961  }
4962  else
4963  {
4964  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
4965  }
4966 }
4967 
4968 #if VMA_STATS_STRING_ENABLED
4969 
4970 // Correspond to values of enum VmaSuballocationType.
4971 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
4972  "FREE",
4973  "UNKNOWN",
4974  "BUFFER",
4975  "IMAGE_UNKNOWN",
4976  "IMAGE_LINEAR",
4977  "IMAGE_OPTIMAL",
4978 };
4979 
4980 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
4981 {
4982  json.BeginObject();
4983 
4984  json.WriteString("Blocks");
4985  json.WriteNumber(stat.blockCount);
4986 
4987  json.WriteString("Allocations");
4988  json.WriteNumber(stat.allocationCount);
4989 
4990  json.WriteString("UnusedRanges");
4991  json.WriteNumber(stat.unusedRangeCount);
4992 
4993  json.WriteString("UsedBytes");
4994  json.WriteNumber(stat.usedBytes);
4995 
4996  json.WriteString("UnusedBytes");
4997  json.WriteNumber(stat.unusedBytes);
4998 
4999  if(stat.allocationCount > 1)
5000  {
5001  json.WriteString("AllocationSize");
5002  json.BeginObject(true);
5003  json.WriteString("Min");
5004  json.WriteNumber(stat.allocationSizeMin);
5005  json.WriteString("Avg");
5006  json.WriteNumber(stat.allocationSizeAvg);
5007  json.WriteString("Max");
5008  json.WriteNumber(stat.allocationSizeMax);
5009  json.EndObject();
5010  }
5011 
5012  if(stat.unusedRangeCount > 1)
5013  {
5014  json.WriteString("UnusedRangeSize");
5015  json.BeginObject(true);
5016  json.WriteString("Min");
5017  json.WriteNumber(stat.unusedRangeSizeMin);
5018  json.WriteString("Avg");
5019  json.WriteNumber(stat.unusedRangeSizeAvg);
5020  json.WriteString("Max");
5021  json.WriteNumber(stat.unusedRangeSizeMax);
5022  json.EndObject();
5023  }
5024 
5025  json.EndObject();
5026 }
5027 
5028 #endif // #if VMA_STATS_STRING_ENABLED
5029 
5030 struct VmaSuballocationItemSizeLess
5031 {
5032  bool operator()(
5033  const VmaSuballocationList::iterator lhs,
5034  const VmaSuballocationList::iterator rhs) const
5035  {
5036  return lhs->size < rhs->size;
5037  }
5038  bool operator()(
5039  const VmaSuballocationList::iterator lhs,
5040  VkDeviceSize rhsSize) const
5041  {
5042  return lhs->size < rhsSize;
5043  }
5044 };
5045 
5047 // class VmaBlockMetadata
5048 
5049 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
5050  m_Size(0),
5051  m_FreeCount(0),
5052  m_SumFreeSize(0),
5053  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
5054  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
5055 {
5056 }
5057 
5058 VmaBlockMetadata::~VmaBlockMetadata()
5059 {
5060 }
5061 
5062 void VmaBlockMetadata::Init(VkDeviceSize size)
5063 {
5064  m_Size = size;
5065  m_FreeCount = 1;
5066  m_SumFreeSize = size;
5067 
5068  VmaSuballocation suballoc = {};
5069  suballoc.offset = 0;
5070  suballoc.size = size;
5071  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5072  suballoc.hAllocation = VK_NULL_HANDLE;
5073 
5074  m_Suballocations.push_back(suballoc);
5075  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
5076  --suballocItem;
5077  m_FreeSuballocationsBySize.push_back(suballocItem);
5078 }
5079 
5080 bool VmaBlockMetadata::Validate() const
5081 {
5082  if(m_Suballocations.empty())
5083  {
5084  return false;
5085  }
5086 
5087  // Expected offset of new suballocation as calculates from previous ones.
5088  VkDeviceSize calculatedOffset = 0;
5089  // Expected number of free suballocations as calculated from traversing their list.
5090  uint32_t calculatedFreeCount = 0;
5091  // Expected sum size of free suballocations as calculated from traversing their list.
5092  VkDeviceSize calculatedSumFreeSize = 0;
5093  // Expected number of free suballocations that should be registered in
5094  // m_FreeSuballocationsBySize calculated from traversing their list.
5095  size_t freeSuballocationsToRegister = 0;
5096  // True if previous visisted suballocation was free.
5097  bool prevFree = false;
5098 
5099  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5100  suballocItem != m_Suballocations.cend();
5101  ++suballocItem)
5102  {
5103  const VmaSuballocation& subAlloc = *suballocItem;
5104 
5105  // Actual offset of this suballocation doesn't match expected one.
5106  if(subAlloc.offset != calculatedOffset)
5107  {
5108  return false;
5109  }
5110 
5111  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
5112  // Two adjacent free suballocations are invalid. They should be merged.
5113  if(prevFree && currFree)
5114  {
5115  return false;
5116  }
5117 
5118  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
5119  {
5120  return false;
5121  }
5122 
5123  if(currFree)
5124  {
5125  calculatedSumFreeSize += subAlloc.size;
5126  ++calculatedFreeCount;
5127  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5128  {
5129  ++freeSuballocationsToRegister;
5130  }
5131  }
5132  else
5133  {
5134  if(subAlloc.hAllocation->GetOffset() != subAlloc.offset)
5135  {
5136  return false;
5137  }
5138  if(subAlloc.hAllocation->GetSize() != subAlloc.size)
5139  {
5140  return false;
5141  }
5142  }
5143 
5144  calculatedOffset += subAlloc.size;
5145  prevFree = currFree;
5146  }
5147 
5148  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
5149  // match expected one.
5150  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
5151  {
5152  return false;
5153  }
5154 
5155  VkDeviceSize lastSize = 0;
5156  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
5157  {
5158  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
5159 
5160  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
5161  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
5162  {
5163  return false;
5164  }
5165  // They must be sorted by size ascending.
5166  if(suballocItem->size < lastSize)
5167  {
5168  return false;
5169  }
5170 
5171  lastSize = suballocItem->size;
5172  }
5173 
5174  // Check if totals match calculacted values.
5175  if(!ValidateFreeSuballocationList() ||
5176  (calculatedOffset != m_Size) ||
5177  (calculatedSumFreeSize != m_SumFreeSize) ||
5178  (calculatedFreeCount != m_FreeCount))
5179  {
5180  return false;
5181  }
5182 
5183  return true;
5184 }
5185 
5186 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
5187 {
5188  if(!m_FreeSuballocationsBySize.empty())
5189  {
5190  return m_FreeSuballocationsBySize.back()->size;
5191  }
5192  else
5193  {
5194  return 0;
5195  }
5196 }
5197 
5198 bool VmaBlockMetadata::IsEmpty() const
5199 {
5200  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
5201 }
5202 
5203 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
5204 {
5205  outInfo.blockCount = 1;
5206 
5207  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
5208  outInfo.allocationCount = rangeCount - m_FreeCount;
5209  outInfo.unusedRangeCount = m_FreeCount;
5210 
5211  outInfo.unusedBytes = m_SumFreeSize;
5212  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
5213 
5214  outInfo.allocationSizeMin = UINT64_MAX;
5215  outInfo.allocationSizeMax = 0;
5216  outInfo.unusedRangeSizeMin = UINT64_MAX;
5217  outInfo.unusedRangeSizeMax = 0;
5218 
5219  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5220  suballocItem != m_Suballocations.cend();
5221  ++suballocItem)
5222  {
5223  const VmaSuballocation& suballoc = *suballocItem;
5224  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
5225  {
5226  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
5227  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
5228  }
5229  else
5230  {
5231  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
5232  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
5233  }
5234  }
5235 }
5236 
5237 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
5238 {
5239  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
5240 
5241  inoutStats.size += m_Size;
5242  inoutStats.unusedSize += m_SumFreeSize;
5243  inoutStats.allocationCount += rangeCount - m_FreeCount;
5244  inoutStats.unusedRangeCount += m_FreeCount;
5245  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
5246 }
5247 
5248 #if VMA_STATS_STRING_ENABLED
5249 
5250 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
5251 {
5252  json.BeginObject();
5253 
5254  json.WriteString("TotalBytes");
5255  json.WriteNumber(m_Size);
5256 
5257  json.WriteString("UnusedBytes");
5258  json.WriteNumber(m_SumFreeSize);
5259 
5260  json.WriteString("Allocations");
5261  json.WriteNumber((uint64_t)m_Suballocations.size() - m_FreeCount);
5262 
5263  json.WriteString("UnusedRanges");
5264  json.WriteNumber(m_FreeCount);
5265 
5266  json.WriteString("Suballocations");
5267  json.BeginArray();
5268  size_t i = 0;
5269  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5270  suballocItem != m_Suballocations.cend();
5271  ++suballocItem, ++i)
5272  {
5273  json.BeginObject(true);
5274 
5275  json.WriteString("Type");
5276  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
5277 
5278  json.WriteString("Size");
5279  json.WriteNumber(suballocItem->size);
5280 
5281  json.WriteString("Offset");
5282  json.WriteNumber(suballocItem->offset);
5283 
5284  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
5285  {
5286  const void* pUserData = suballocItem->hAllocation->GetUserData();
5287  if(pUserData != VMA_NULL)
5288  {
5289  json.WriteString("UserData");
5290  if(suballocItem->hAllocation->IsUserDataString())
5291  {
5292  json.WriteString((const char*)pUserData);
5293  }
5294  else
5295  {
5296  json.BeginString();
5297  json.ContinueString_Pointer(pUserData);
5298  json.EndString();
5299  }
5300  }
5301  }
5302 
5303  json.EndObject();
5304  }
5305  json.EndArray();
5306 
5307  json.EndObject();
5308 }
5309 
5310 #endif // #if VMA_STATS_STRING_ENABLED
5311 
5312 /*
5313 How many suitable free suballocations to analyze before choosing best one.
5314 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
5315  be chosen.
5316 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
5317  suballocations will be analized and best one will be chosen.
5318 - Any other value is also acceptable.
5319 */
5320 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
5321 
5322 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
5323 {
5324  VMA_ASSERT(IsEmpty());
5325  pAllocationRequest->offset = 0;
5326  pAllocationRequest->sumFreeSize = m_SumFreeSize;
5327  pAllocationRequest->sumItemSize = 0;
5328  pAllocationRequest->item = m_Suballocations.begin();
5329  pAllocationRequest->itemsToMakeLostCount = 0;
5330 }
5331 
5332 bool VmaBlockMetadata::CreateAllocationRequest(
5333  uint32_t currentFrameIndex,
5334  uint32_t frameInUseCount,
5335  VkDeviceSize bufferImageGranularity,
5336  VkDeviceSize allocSize,
5337  VkDeviceSize allocAlignment,
5338  VmaSuballocationType allocType,
5339  bool canMakeOtherLost,
5340  VmaAllocationRequest* pAllocationRequest)
5341 {
5342  VMA_ASSERT(allocSize > 0);
5343  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5344  VMA_ASSERT(pAllocationRequest != VMA_NULL);
5345  VMA_HEAVY_ASSERT(Validate());
5346 
5347  // There is not enough total free space in this block to fullfill the request: Early return.
5348  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
5349  {
5350  return false;
5351  }
5352 
5353  // New algorithm, efficiently searching freeSuballocationsBySize.
5354  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
5355  if(freeSuballocCount > 0)
5356  {
5357  if(VMA_BEST_FIT)
5358  {
5359  // Find first free suballocation with size not less than allocSize.
5360  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5361  m_FreeSuballocationsBySize.data(),
5362  m_FreeSuballocationsBySize.data() + freeSuballocCount,
5363  allocSize,
5364  VmaSuballocationItemSizeLess());
5365  size_t index = it - m_FreeSuballocationsBySize.data();
5366  for(; index < freeSuballocCount; ++index)
5367  {
5368  if(CheckAllocation(
5369  currentFrameIndex,
5370  frameInUseCount,
5371  bufferImageGranularity,
5372  allocSize,
5373  allocAlignment,
5374  allocType,
5375  m_FreeSuballocationsBySize[index],
5376  false, // canMakeOtherLost
5377  &pAllocationRequest->offset,
5378  &pAllocationRequest->itemsToMakeLostCount,
5379  &pAllocationRequest->sumFreeSize,
5380  &pAllocationRequest->sumItemSize))
5381  {
5382  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5383  return true;
5384  }
5385  }
5386  }
5387  else
5388  {
5389  // Search staring from biggest suballocations.
5390  for(size_t index = freeSuballocCount; index--; )
5391  {
5392  if(CheckAllocation(
5393  currentFrameIndex,
5394  frameInUseCount,
5395  bufferImageGranularity,
5396  allocSize,
5397  allocAlignment,
5398  allocType,
5399  m_FreeSuballocationsBySize[index],
5400  false, // canMakeOtherLost
5401  &pAllocationRequest->offset,
5402  &pAllocationRequest->itemsToMakeLostCount,
5403  &pAllocationRequest->sumFreeSize,
5404  &pAllocationRequest->sumItemSize))
5405  {
5406  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5407  return true;
5408  }
5409  }
5410  }
5411  }
5412 
5413  if(canMakeOtherLost)
5414  {
5415  // Brute-force algorithm. TODO: Come up with something better.
5416 
5417  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
5418  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
5419 
5420  VmaAllocationRequest tmpAllocRequest = {};
5421  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
5422  suballocIt != m_Suballocations.end();
5423  ++suballocIt)
5424  {
5425  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
5426  suballocIt->hAllocation->CanBecomeLost())
5427  {
5428  if(CheckAllocation(
5429  currentFrameIndex,
5430  frameInUseCount,
5431  bufferImageGranularity,
5432  allocSize,
5433  allocAlignment,
5434  allocType,
5435  suballocIt,
5436  canMakeOtherLost,
5437  &tmpAllocRequest.offset,
5438  &tmpAllocRequest.itemsToMakeLostCount,
5439  &tmpAllocRequest.sumFreeSize,
5440  &tmpAllocRequest.sumItemSize))
5441  {
5442  tmpAllocRequest.item = suballocIt;
5443 
5444  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
5445  {
5446  *pAllocationRequest = tmpAllocRequest;
5447  }
5448  }
5449  }
5450  }
5451 
5452  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
5453  {
5454  return true;
5455  }
5456  }
5457 
5458  return false;
5459 }
5460 
5461 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
5462  uint32_t currentFrameIndex,
5463  uint32_t frameInUseCount,
5464  VmaAllocationRequest* pAllocationRequest)
5465 {
5466  while(pAllocationRequest->itemsToMakeLostCount > 0)
5467  {
5468  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
5469  {
5470  ++pAllocationRequest->item;
5471  }
5472  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5473  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
5474  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
5475  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5476  {
5477  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
5478  --pAllocationRequest->itemsToMakeLostCount;
5479  }
5480  else
5481  {
5482  return false;
5483  }
5484  }
5485 
5486  VMA_HEAVY_ASSERT(Validate());
5487  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5488  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
5489 
5490  return true;
5491 }
5492 
5493 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
5494 {
5495  uint32_t lostAllocationCount = 0;
5496  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
5497  it != m_Suballocations.end();
5498  ++it)
5499  {
5500  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
5501  it->hAllocation->CanBecomeLost() &&
5502  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5503  {
5504  it = FreeSuballocation(it);
5505  ++lostAllocationCount;
5506  }
5507  }
5508  return lostAllocationCount;
5509 }
5510 
5511 void VmaBlockMetadata::Alloc(
5512  const VmaAllocationRequest& request,
5513  VmaSuballocationType type,
5514  VkDeviceSize allocSize,
5515  VmaAllocation hAllocation)
5516 {
5517  VMA_ASSERT(request.item != m_Suballocations.end());
5518  VmaSuballocation& suballoc = *request.item;
5519  // Given suballocation is a free block.
5520  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5521  // Given offset is inside this suballocation.
5522  VMA_ASSERT(request.offset >= suballoc.offset);
5523  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
5524  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
5525  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
5526 
5527  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
5528  // it to become used.
5529  UnregisterFreeSuballocation(request.item);
5530 
5531  suballoc.offset = request.offset;
5532  suballoc.size = allocSize;
5533  suballoc.type = type;
5534  suballoc.hAllocation = hAllocation;
5535 
5536  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
5537  if(paddingEnd)
5538  {
5539  VmaSuballocation paddingSuballoc = {};
5540  paddingSuballoc.offset = request.offset + allocSize;
5541  paddingSuballoc.size = paddingEnd;
5542  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5543  VmaSuballocationList::iterator next = request.item;
5544  ++next;
5545  const VmaSuballocationList::iterator paddingEndItem =
5546  m_Suballocations.insert(next, paddingSuballoc);
5547  RegisterFreeSuballocation(paddingEndItem);
5548  }
5549 
5550  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
5551  if(paddingBegin)
5552  {
5553  VmaSuballocation paddingSuballoc = {};
5554  paddingSuballoc.offset = request.offset - paddingBegin;
5555  paddingSuballoc.size = paddingBegin;
5556  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5557  const VmaSuballocationList::iterator paddingBeginItem =
5558  m_Suballocations.insert(request.item, paddingSuballoc);
5559  RegisterFreeSuballocation(paddingBeginItem);
5560  }
5561 
5562  // Update totals.
5563  m_FreeCount = m_FreeCount - 1;
5564  if(paddingBegin > 0)
5565  {
5566  ++m_FreeCount;
5567  }
5568  if(paddingEnd > 0)
5569  {
5570  ++m_FreeCount;
5571  }
5572  m_SumFreeSize -= allocSize;
5573 }
5574 
5575 void VmaBlockMetadata::Free(const VmaAllocation allocation)
5576 {
5577  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5578  suballocItem != m_Suballocations.end();
5579  ++suballocItem)
5580  {
5581  VmaSuballocation& suballoc = *suballocItem;
5582  if(suballoc.hAllocation == allocation)
5583  {
5584  FreeSuballocation(suballocItem);
5585  VMA_HEAVY_ASSERT(Validate());
5586  return;
5587  }
5588  }
5589  VMA_ASSERT(0 && "Not found!");
5590 }
5591 
5592 void VmaBlockMetadata::FreeAtOffset(VkDeviceSize offset)
5593 {
5594  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5595  suballocItem != m_Suballocations.end();
5596  ++suballocItem)
5597  {
5598  VmaSuballocation& suballoc = *suballocItem;
5599  if(suballoc.offset == offset)
5600  {
5601  FreeSuballocation(suballocItem);
5602  return;
5603  }
5604  }
5605  VMA_ASSERT(0 && "Not found!");
5606 }
5607 
5608 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
5609 {
5610  VkDeviceSize lastSize = 0;
5611  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
5612  {
5613  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
5614 
5615  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
5616  {
5617  VMA_ASSERT(0);
5618  return false;
5619  }
5620  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5621  {
5622  VMA_ASSERT(0);
5623  return false;
5624  }
5625  if(it->size < lastSize)
5626  {
5627  VMA_ASSERT(0);
5628  return false;
5629  }
5630 
5631  lastSize = it->size;
5632  }
5633  return true;
5634 }
5635 
5636 bool VmaBlockMetadata::CheckAllocation(
5637  uint32_t currentFrameIndex,
5638  uint32_t frameInUseCount,
5639  VkDeviceSize bufferImageGranularity,
5640  VkDeviceSize allocSize,
5641  VkDeviceSize allocAlignment,
5642  VmaSuballocationType allocType,
5643  VmaSuballocationList::const_iterator suballocItem,
5644  bool canMakeOtherLost,
5645  VkDeviceSize* pOffset,
5646  size_t* itemsToMakeLostCount,
5647  VkDeviceSize* pSumFreeSize,
5648  VkDeviceSize* pSumItemSize) const
5649 {
5650  VMA_ASSERT(allocSize > 0);
5651  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5652  VMA_ASSERT(suballocItem != m_Suballocations.cend());
5653  VMA_ASSERT(pOffset != VMA_NULL);
5654 
5655  *itemsToMakeLostCount = 0;
5656  *pSumFreeSize = 0;
5657  *pSumItemSize = 0;
5658 
5659  if(canMakeOtherLost)
5660  {
5661  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5662  {
5663  *pSumFreeSize = suballocItem->size;
5664  }
5665  else
5666  {
5667  if(suballocItem->hAllocation->CanBecomeLost() &&
5668  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5669  {
5670  ++*itemsToMakeLostCount;
5671  *pSumItemSize = suballocItem->size;
5672  }
5673  else
5674  {
5675  return false;
5676  }
5677  }
5678 
5679  // Remaining size is too small for this request: Early return.
5680  if(m_Size - suballocItem->offset < allocSize)
5681  {
5682  return false;
5683  }
5684 
5685  // Start from offset equal to beginning of this suballocation.
5686  *pOffset = suballocItem->offset;
5687 
5688  // Apply VMA_DEBUG_MARGIN at the beginning.
5689  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5690  {
5691  *pOffset += VMA_DEBUG_MARGIN;
5692  }
5693 
5694  // Apply alignment.
5695  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5696  *pOffset = VmaAlignUp(*pOffset, alignment);
5697 
5698  // Check previous suballocations for BufferImageGranularity conflicts.
5699  // Make bigger alignment if necessary.
5700  if(bufferImageGranularity > 1)
5701  {
5702  bool bufferImageGranularityConflict = false;
5703  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5704  while(prevSuballocItem != m_Suballocations.cbegin())
5705  {
5706  --prevSuballocItem;
5707  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5708  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5709  {
5710  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5711  {
5712  bufferImageGranularityConflict = true;
5713  break;
5714  }
5715  }
5716  else
5717  // Already on previous page.
5718  break;
5719  }
5720  if(bufferImageGranularityConflict)
5721  {
5722  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5723  }
5724  }
5725 
5726  // Now that we have final *pOffset, check if we are past suballocItem.
5727  // If yes, return false - this function should be called for another suballocItem as starting point.
5728  if(*pOffset >= suballocItem->offset + suballocItem->size)
5729  {
5730  return false;
5731  }
5732 
5733  // Calculate padding at the beginning based on current offset.
5734  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
5735 
5736  // Calculate required margin at the end if this is not last suballocation.
5737  VmaSuballocationList::const_iterator next = suballocItem;
5738  ++next;
5739  const VkDeviceSize requiredEndMargin =
5740  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5741 
5742  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
5743  // Another early return check.
5744  if(suballocItem->offset + totalSize > m_Size)
5745  {
5746  return false;
5747  }
5748 
5749  // Advance lastSuballocItem until desired size is reached.
5750  // Update itemsToMakeLostCount.
5751  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
5752  if(totalSize > suballocItem->size)
5753  {
5754  VkDeviceSize remainingSize = totalSize - suballocItem->size;
5755  while(remainingSize > 0)
5756  {
5757  ++lastSuballocItem;
5758  if(lastSuballocItem == m_Suballocations.cend())
5759  {
5760  return false;
5761  }
5762  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5763  {
5764  *pSumFreeSize += lastSuballocItem->size;
5765  }
5766  else
5767  {
5768  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
5769  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
5770  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5771  {
5772  ++*itemsToMakeLostCount;
5773  *pSumItemSize += lastSuballocItem->size;
5774  }
5775  else
5776  {
5777  return false;
5778  }
5779  }
5780  remainingSize = (lastSuballocItem->size < remainingSize) ?
5781  remainingSize - lastSuballocItem->size : 0;
5782  }
5783  }
5784 
5785  // Check next suballocations for BufferImageGranularity conflicts.
5786  // If conflict exists, we must mark more allocations lost or fail.
5787  if(bufferImageGranularity > 1)
5788  {
5789  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
5790  ++nextSuballocItem;
5791  while(nextSuballocItem != m_Suballocations.cend())
5792  {
5793  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5794  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5795  {
5796  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5797  {
5798  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
5799  if(nextSuballoc.hAllocation->CanBecomeLost() &&
5800  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5801  {
5802  ++*itemsToMakeLostCount;
5803  }
5804  else
5805  {
5806  return false;
5807  }
5808  }
5809  }
5810  else
5811  {
5812  // Already on next page.
5813  break;
5814  }
5815  ++nextSuballocItem;
5816  }
5817  }
5818  }
5819  else
5820  {
5821  const VmaSuballocation& suballoc = *suballocItem;
5822  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5823 
5824  *pSumFreeSize = suballoc.size;
5825 
5826  // Size of this suballocation is too small for this request: Early return.
5827  if(suballoc.size < allocSize)
5828  {
5829  return false;
5830  }
5831 
5832  // Start from offset equal to beginning of this suballocation.
5833  *pOffset = suballoc.offset;
5834 
5835  // Apply VMA_DEBUG_MARGIN at the beginning.
5836  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5837  {
5838  *pOffset += VMA_DEBUG_MARGIN;
5839  }
5840 
5841  // Apply alignment.
5842  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5843  *pOffset = VmaAlignUp(*pOffset, alignment);
5844 
5845  // Check previous suballocations for BufferImageGranularity conflicts.
5846  // Make bigger alignment if necessary.
5847  if(bufferImageGranularity > 1)
5848  {
5849  bool bufferImageGranularityConflict = false;
5850  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5851  while(prevSuballocItem != m_Suballocations.cbegin())
5852  {
5853  --prevSuballocItem;
5854  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5855  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5856  {
5857  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5858  {
5859  bufferImageGranularityConflict = true;
5860  break;
5861  }
5862  }
5863  else
5864  // Already on previous page.
5865  break;
5866  }
5867  if(bufferImageGranularityConflict)
5868  {
5869  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5870  }
5871  }
5872 
5873  // Calculate padding at the beginning based on current offset.
5874  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
5875 
5876  // Calculate required margin at the end if this is not last suballocation.
5877  VmaSuballocationList::const_iterator next = suballocItem;
5878  ++next;
5879  const VkDeviceSize requiredEndMargin =
5880  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5881 
5882  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
5883  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
5884  {
5885  return false;
5886  }
5887 
5888  // Check next suballocations for BufferImageGranularity conflicts.
5889  // If conflict exists, allocation cannot be made here.
5890  if(bufferImageGranularity > 1)
5891  {
5892  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
5893  ++nextSuballocItem;
5894  while(nextSuballocItem != m_Suballocations.cend())
5895  {
5896  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5897  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5898  {
5899  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5900  {
5901  return false;
5902  }
5903  }
5904  else
5905  {
5906  // Already on next page.
5907  break;
5908  }
5909  ++nextSuballocItem;
5910  }
5911  }
5912  }
5913 
5914  // All tests passed: Success. pOffset is already filled.
5915  return true;
5916 }
5917 
5918 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
5919 {
5920  VMA_ASSERT(item != m_Suballocations.end());
5921  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5922 
5923  VmaSuballocationList::iterator nextItem = item;
5924  ++nextItem;
5925  VMA_ASSERT(nextItem != m_Suballocations.end());
5926  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
5927 
5928  item->size += nextItem->size;
5929  --m_FreeCount;
5930  m_Suballocations.erase(nextItem);
5931 }
5932 
5933 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
5934 {
5935  // Change this suballocation to be marked as free.
5936  VmaSuballocation& suballoc = *suballocItem;
5937  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5938  suballoc.hAllocation = VK_NULL_HANDLE;
5939 
5940  // Update totals.
5941  ++m_FreeCount;
5942  m_SumFreeSize += suballoc.size;
5943 
5944  // Merge with previous and/or next suballocation if it's also free.
5945  bool mergeWithNext = false;
5946  bool mergeWithPrev = false;
5947 
5948  VmaSuballocationList::iterator nextItem = suballocItem;
5949  ++nextItem;
5950  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
5951  {
5952  mergeWithNext = true;
5953  }
5954 
5955  VmaSuballocationList::iterator prevItem = suballocItem;
5956  if(suballocItem != m_Suballocations.begin())
5957  {
5958  --prevItem;
5959  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5960  {
5961  mergeWithPrev = true;
5962  }
5963  }
5964 
5965  if(mergeWithNext)
5966  {
5967  UnregisterFreeSuballocation(nextItem);
5968  MergeFreeWithNext(suballocItem);
5969  }
5970 
5971  if(mergeWithPrev)
5972  {
5973  UnregisterFreeSuballocation(prevItem);
5974  MergeFreeWithNext(prevItem);
5975  RegisterFreeSuballocation(prevItem);
5976  return prevItem;
5977  }
5978  else
5979  {
5980  RegisterFreeSuballocation(suballocItem);
5981  return suballocItem;
5982  }
5983 }
5984 
5985 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
5986 {
5987  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5988  VMA_ASSERT(item->size > 0);
5989 
5990  // You may want to enable this validation at the beginning or at the end of
5991  // this function, depending on what do you want to check.
5992  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5993 
5994  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5995  {
5996  if(m_FreeSuballocationsBySize.empty())
5997  {
5998  m_FreeSuballocationsBySize.push_back(item);
5999  }
6000  else
6001  {
6002  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
6003  }
6004  }
6005 
6006  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6007 }
6008 
6009 
6010 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
6011 {
6012  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
6013  VMA_ASSERT(item->size > 0);
6014 
6015  // You may want to enable this validation at the beginning or at the end of
6016  // this function, depending on what do you want to check.
6017  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6018 
6019  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6020  {
6021  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
6022  m_FreeSuballocationsBySize.data(),
6023  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
6024  item,
6025  VmaSuballocationItemSizeLess());
6026  for(size_t index = it - m_FreeSuballocationsBySize.data();
6027  index < m_FreeSuballocationsBySize.size();
6028  ++index)
6029  {
6030  if(m_FreeSuballocationsBySize[index] == item)
6031  {
6032  VmaVectorRemove(m_FreeSuballocationsBySize, index);
6033  return;
6034  }
6035  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
6036  }
6037  VMA_ASSERT(0 && "Not found.");
6038  }
6039 
6040  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6041 }
6042 
6044 // class VmaDeviceMemoryMapping
6045 
6046 VmaDeviceMemoryMapping::VmaDeviceMemoryMapping() :
6047  m_MapCount(0),
6048  m_pMappedData(VMA_NULL)
6049 {
6050 }
6051 
6052 VmaDeviceMemoryMapping::~VmaDeviceMemoryMapping()
6053 {
6054  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
6055 }
6056 
6057 VkResult VmaDeviceMemoryMapping::Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count, void **ppData)
6058 {
6059  if(count == 0)
6060  {
6061  return VK_SUCCESS;
6062  }
6063 
6064  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6065  if(m_MapCount != 0)
6066  {
6067  m_MapCount += count;
6068  VMA_ASSERT(m_pMappedData != VMA_NULL);
6069  if(ppData != VMA_NULL)
6070  {
6071  *ppData = m_pMappedData;
6072  }
6073  return VK_SUCCESS;
6074  }
6075  else
6076  {
6077  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
6078  hAllocator->m_hDevice,
6079  hMemory,
6080  0, // offset
6081  VK_WHOLE_SIZE,
6082  0, // flags
6083  &m_pMappedData);
6084  if(result == VK_SUCCESS)
6085  {
6086  if(ppData != VMA_NULL)
6087  {
6088  *ppData = m_pMappedData;
6089  }
6090  m_MapCount = count;
6091  }
6092  return result;
6093  }
6094 }
6095 
6096 void VmaDeviceMemoryMapping::Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count)
6097 {
6098  if(count == 0)
6099  {
6100  return;
6101  }
6102 
6103  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6104  if(m_MapCount >= count)
6105  {
6106  m_MapCount -= count;
6107  if(m_MapCount == 0)
6108  {
6109  m_pMappedData = VMA_NULL;
6110  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, hMemory);
6111  }
6112  }
6113  else
6114  {
6115  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
6116  }
6117 }
6118 
6120 // class VmaDeviceMemoryBlock
6121 
6122 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
6123  m_MemoryTypeIndex(UINT32_MAX),
6124  m_hMemory(VK_NULL_HANDLE),
6125  m_Metadata(hAllocator)
6126 {
6127 }
6128 
6129 void VmaDeviceMemoryBlock::Init(
6130  uint32_t newMemoryTypeIndex,
6131  VkDeviceMemory newMemory,
6132  VkDeviceSize newSize)
6133 {
6134  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
6135 
6136  m_MemoryTypeIndex = newMemoryTypeIndex;
6137  m_hMemory = newMemory;
6138 
6139  m_Metadata.Init(newSize);
6140 }
6141 
6142 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
6143 {
6144  // This is the most important assert in the entire library.
6145  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
6146  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
6147 
6148  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
6149  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
6150  m_hMemory = VK_NULL_HANDLE;
6151 }
6152 
6153 bool VmaDeviceMemoryBlock::Validate() const
6154 {
6155  if((m_hMemory == VK_NULL_HANDLE) ||
6156  (m_Metadata.GetSize() == 0))
6157  {
6158  return false;
6159  }
6160 
6161  return m_Metadata.Validate();
6162 }
6163 
6164 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
6165 {
6166  return m_Mapping.Map(hAllocator, m_hMemory, count, ppData);
6167 }
6168 
6169 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
6170 {
6171  m_Mapping.Unmap(hAllocator, m_hMemory, count);
6172 }
6173 
6174 static void InitStatInfo(VmaStatInfo& outInfo)
6175 {
6176  memset(&outInfo, 0, sizeof(outInfo));
6177  outInfo.allocationSizeMin = UINT64_MAX;
6178  outInfo.unusedRangeSizeMin = UINT64_MAX;
6179 }
6180 
6181 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
6182 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
6183 {
6184  inoutInfo.blockCount += srcInfo.blockCount;
6185  inoutInfo.allocationCount += srcInfo.allocationCount;
6186  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
6187  inoutInfo.usedBytes += srcInfo.usedBytes;
6188  inoutInfo.unusedBytes += srcInfo.unusedBytes;
6189  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
6190  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
6191  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
6192  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
6193 }
6194 
6195 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
6196 {
6197  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
6198  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
6199  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
6200  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
6201 }
6202 
6203 VmaPool_T::VmaPool_T(
6204  VmaAllocator hAllocator,
6205  const VmaPoolCreateInfo& createInfo) :
6206  m_BlockVector(
6207  hAllocator,
6208  createInfo.memoryTypeIndex,
6209  createInfo.blockSize,
6210  createInfo.minBlockCount,
6211  createInfo.maxBlockCount,
6212  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
6213  createInfo.frameInUseCount,
6214  true) // isCustomPool
6215 {
6216 }
6217 
6218 VmaPool_T::~VmaPool_T()
6219 {
6220 }
6221 
6222 #if VMA_STATS_STRING_ENABLED
6223 
6224 #endif // #if VMA_STATS_STRING_ENABLED
6225 
6226 VmaBlockVector::VmaBlockVector(
6227  VmaAllocator hAllocator,
6228  uint32_t memoryTypeIndex,
6229  VkDeviceSize preferredBlockSize,
6230  size_t minBlockCount,
6231  size_t maxBlockCount,
6232  VkDeviceSize bufferImageGranularity,
6233  uint32_t frameInUseCount,
6234  bool isCustomPool) :
6235  m_hAllocator(hAllocator),
6236  m_MemoryTypeIndex(memoryTypeIndex),
6237  m_PreferredBlockSize(preferredBlockSize),
6238  m_MinBlockCount(minBlockCount),
6239  m_MaxBlockCount(maxBlockCount),
6240  m_BufferImageGranularity(bufferImageGranularity),
6241  m_FrameInUseCount(frameInUseCount),
6242  m_IsCustomPool(isCustomPool),
6243  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
6244  m_HasEmptyBlock(false),
6245  m_pDefragmentator(VMA_NULL)
6246 {
6247 }
6248 
6249 VmaBlockVector::~VmaBlockVector()
6250 {
6251  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
6252 
6253  for(size_t i = m_Blocks.size(); i--; )
6254  {
6255  m_Blocks[i]->Destroy(m_hAllocator);
6256  vma_delete(m_hAllocator, m_Blocks[i]);
6257  }
6258 }
6259 
6260 VkResult VmaBlockVector::CreateMinBlocks()
6261 {
6262  for(size_t i = 0; i < m_MinBlockCount; ++i)
6263  {
6264  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
6265  if(res != VK_SUCCESS)
6266  {
6267  return res;
6268  }
6269  }
6270  return VK_SUCCESS;
6271 }
6272 
6273 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
6274 {
6275  pStats->size = 0;
6276  pStats->unusedSize = 0;
6277  pStats->allocationCount = 0;
6278  pStats->unusedRangeCount = 0;
6279  pStats->unusedRangeSizeMax = 0;
6280 
6281  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6282 
6283  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6284  {
6285  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6286  VMA_ASSERT(pBlock);
6287  VMA_HEAVY_ASSERT(pBlock->Validate());
6288  pBlock->m_Metadata.AddPoolStats(*pStats);
6289  }
6290 }
6291 
6292 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
6293 
6294 VkResult VmaBlockVector::Allocate(
6295  VmaPool hCurrentPool,
6296  uint32_t currentFrameIndex,
6297  const VkMemoryRequirements& vkMemReq,
6298  const VmaAllocationCreateInfo& createInfo,
6299  VmaSuballocationType suballocType,
6300  VmaAllocation* pAllocation)
6301 {
6302  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
6303  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
6304 
6305  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6306 
6307  // 1. Search existing allocations. Try to allocate without making other allocations lost.
6308  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6309  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6310  {
6311  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6312  VMA_ASSERT(pCurrBlock);
6313  VmaAllocationRequest currRequest = {};
6314  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6315  currentFrameIndex,
6316  m_FrameInUseCount,
6317  m_BufferImageGranularity,
6318  vkMemReq.size,
6319  vkMemReq.alignment,
6320  suballocType,
6321  false, // canMakeOtherLost
6322  &currRequest))
6323  {
6324  // Allocate from pCurrBlock.
6325  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
6326 
6327  if(mapped)
6328  {
6329  VkResult res = pCurrBlock->Map(m_hAllocator, 1, VMA_NULL);
6330  if(res != VK_SUCCESS)
6331  {
6332  return res;
6333  }
6334  }
6335 
6336  // We no longer have an empty Allocation.
6337  if(pCurrBlock->m_Metadata.IsEmpty())
6338  {
6339  m_HasEmptyBlock = false;
6340  }
6341 
6342  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6343  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
6344  (*pAllocation)->InitBlockAllocation(
6345  hCurrentPool,
6346  pCurrBlock,
6347  currRequest.offset,
6348  vkMemReq.alignment,
6349  vkMemReq.size,
6350  suballocType,
6351  mapped,
6352  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6353  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
6354  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6355  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6356  return VK_SUCCESS;
6357  }
6358  }
6359 
6360  const bool canCreateNewBlock =
6361  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
6362  (m_Blocks.size() < m_MaxBlockCount);
6363 
6364  // 2. Try to create new block.
6365  if(canCreateNewBlock)
6366  {
6367  // Calculate optimal size for new block.
6368  VkDeviceSize newBlockSize = m_PreferredBlockSize;
6369  uint32_t newBlockSizeShift = 0;
6370  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
6371 
6372  // Allocating blocks of other sizes is allowed only in default pools.
6373  // In custom pools block size is fixed.
6374  if(m_IsCustomPool == false)
6375  {
6376  // Allocate 1/8, 1/4, 1/2 as first blocks.
6377  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
6378  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
6379  {
6380  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
6381  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= vkMemReq.size * 2)
6382  {
6383  newBlockSize = smallerNewBlockSize;
6384  ++newBlockSizeShift;
6385  }
6386  else
6387  {
6388  break;
6389  }
6390  }
6391  }
6392 
6393  size_t newBlockIndex = 0;
6394  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
6395  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
6396  if(m_IsCustomPool == false)
6397  {
6398  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
6399  {
6400  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
6401  if(smallerNewBlockSize >= vkMemReq.size)
6402  {
6403  newBlockSize = smallerNewBlockSize;
6404  ++newBlockSizeShift;
6405  res = CreateBlock(newBlockSize, &newBlockIndex);
6406  }
6407  else
6408  {
6409  break;
6410  }
6411  }
6412  }
6413 
6414  if(res == VK_SUCCESS)
6415  {
6416  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
6417  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= vkMemReq.size);
6418 
6419  if(mapped)
6420  {
6421  res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
6422  if(res != VK_SUCCESS)
6423  {
6424  return res;
6425  }
6426  }
6427 
6428  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
6429  VmaAllocationRequest allocRequest;
6430  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
6431  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6432  pBlock->m_Metadata.Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
6433  (*pAllocation)->InitBlockAllocation(
6434  hCurrentPool,
6435  pBlock,
6436  allocRequest.offset,
6437  vkMemReq.alignment,
6438  vkMemReq.size,
6439  suballocType,
6440  mapped,
6441  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6442  VMA_HEAVY_ASSERT(pBlock->Validate());
6443  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
6444  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6445  return VK_SUCCESS;
6446  }
6447  }
6448 
6449  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
6450 
6451  // 3. Try to allocate from existing blocks with making other allocations lost.
6452  if(canMakeOtherLost)
6453  {
6454  uint32_t tryIndex = 0;
6455  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
6456  {
6457  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
6458  VmaAllocationRequest bestRequest = {};
6459  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
6460 
6461  // 1. Search existing allocations.
6462  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6463  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6464  {
6465  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6466  VMA_ASSERT(pCurrBlock);
6467  VmaAllocationRequest currRequest = {};
6468  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6469  currentFrameIndex,
6470  m_FrameInUseCount,
6471  m_BufferImageGranularity,
6472  vkMemReq.size,
6473  vkMemReq.alignment,
6474  suballocType,
6475  canMakeOtherLost,
6476  &currRequest))
6477  {
6478  const VkDeviceSize currRequestCost = currRequest.CalcCost();
6479  if(pBestRequestBlock == VMA_NULL ||
6480  currRequestCost < bestRequestCost)
6481  {
6482  pBestRequestBlock = pCurrBlock;
6483  bestRequest = currRequest;
6484  bestRequestCost = currRequestCost;
6485 
6486  if(bestRequestCost == 0)
6487  {
6488  break;
6489  }
6490  }
6491  }
6492  }
6493 
6494  if(pBestRequestBlock != VMA_NULL)
6495  {
6496  if(mapped)
6497  {
6498  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
6499  if(res != VK_SUCCESS)
6500  {
6501  return res;
6502  }
6503  }
6504 
6505  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
6506  currentFrameIndex,
6507  m_FrameInUseCount,
6508  &bestRequest))
6509  {
6510  // We no longer have an empty Allocation.
6511  if(pBestRequestBlock->m_Metadata.IsEmpty())
6512  {
6513  m_HasEmptyBlock = false;
6514  }
6515  // Allocate from this pBlock.
6516  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6517  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
6518  (*pAllocation)->InitBlockAllocation(
6519  hCurrentPool,
6520  pBestRequestBlock,
6521  bestRequest.offset,
6522  vkMemReq.alignment,
6523  vkMemReq.size,
6524  suballocType,
6525  mapped,
6526  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6527  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
6528  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6529  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6530  return VK_SUCCESS;
6531  }
6532  // else: Some allocations must have been touched while we are here. Next try.
6533  }
6534  else
6535  {
6536  // Could not find place in any of the blocks - break outer loop.
6537  break;
6538  }
6539  }
6540  /* Maximum number of tries exceeded - a very unlike event when many other
6541  threads are simultaneously touching allocations making it impossible to make
6542  lost at the same time as we try to allocate. */
6543  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
6544  {
6545  return VK_ERROR_TOO_MANY_OBJECTS;
6546  }
6547  }
6548 
6549  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6550 }
6551 
6552 void VmaBlockVector::Free(
6553  VmaAllocation hAllocation)
6554 {
6555  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
6556 
6557  // Scope for lock.
6558  {
6559  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6560 
6561  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
6562 
6563  if(hAllocation->IsPersistentMap())
6564  {
6565  pBlock->m_Mapping.Unmap(m_hAllocator, pBlock->m_hMemory, 1);
6566  }
6567 
6568  pBlock->m_Metadata.Free(hAllocation);
6569  VMA_HEAVY_ASSERT(pBlock->Validate());
6570 
6571  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
6572 
6573  // pBlock became empty after this deallocation.
6574  if(pBlock->m_Metadata.IsEmpty())
6575  {
6576  // Already has empty Allocation. We don't want to have two, so delete this one.
6577  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
6578  {
6579  pBlockToDelete = pBlock;
6580  Remove(pBlock);
6581  }
6582  // We now have first empty Allocation.
6583  else
6584  {
6585  m_HasEmptyBlock = true;
6586  }
6587  }
6588  // pBlock didn't become empty, but we have another empty block - find and free that one.
6589  // (This is optional, heuristics.)
6590  else if(m_HasEmptyBlock)
6591  {
6592  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
6593  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
6594  {
6595  pBlockToDelete = pLastBlock;
6596  m_Blocks.pop_back();
6597  m_HasEmptyBlock = false;
6598  }
6599  }
6600 
6601  IncrementallySortBlocks();
6602  }
6603 
6604  // Destruction of a free Allocation. Deferred until this point, outside of mutex
6605  // lock, for performance reason.
6606  if(pBlockToDelete != VMA_NULL)
6607  {
6608  VMA_DEBUG_LOG(" Deleted empty allocation");
6609  pBlockToDelete->Destroy(m_hAllocator);
6610  vma_delete(m_hAllocator, pBlockToDelete);
6611  }
6612 }
6613 
6614 size_t VmaBlockVector::CalcMaxBlockSize() const
6615 {
6616  size_t result = 0;
6617  for(size_t i = m_Blocks.size(); i--; )
6618  {
6619  result = VMA_MAX((uint64_t)result, (uint64_t)m_Blocks[i]->m_Metadata.GetSize());
6620  if(result >= m_PreferredBlockSize)
6621  {
6622  break;
6623  }
6624  }
6625  return result;
6626 }
6627 
6628 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
6629 {
6630  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6631  {
6632  if(m_Blocks[blockIndex] == pBlock)
6633  {
6634  VmaVectorRemove(m_Blocks, blockIndex);
6635  return;
6636  }
6637  }
6638  VMA_ASSERT(0);
6639 }
6640 
6641 void VmaBlockVector::IncrementallySortBlocks()
6642 {
6643  // Bubble sort only until first swap.
6644  for(size_t i = 1; i < m_Blocks.size(); ++i)
6645  {
6646  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
6647  {
6648  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
6649  return;
6650  }
6651  }
6652 }
6653 
6654 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
6655 {
6656  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
6657  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
6658  allocInfo.allocationSize = blockSize;
6659  VkDeviceMemory mem = VK_NULL_HANDLE;
6660  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
6661  if(res < 0)
6662  {
6663  return res;
6664  }
6665 
6666  // New VkDeviceMemory successfully created.
6667 
6668  // Create new Allocation for it.
6669  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
6670  pBlock->Init(
6671  m_MemoryTypeIndex,
6672  mem,
6673  allocInfo.allocationSize);
6674 
6675  m_Blocks.push_back(pBlock);
6676  if(pNewBlockIndex != VMA_NULL)
6677  {
6678  *pNewBlockIndex = m_Blocks.size() - 1;
6679  }
6680 
6681  return VK_SUCCESS;
6682 }
6683 
6684 #if VMA_STATS_STRING_ENABLED
6685 
6686 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
6687 {
6688  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6689 
6690  json.BeginObject();
6691 
6692  if(m_IsCustomPool)
6693  {
6694  json.WriteString("MemoryTypeIndex");
6695  json.WriteNumber(m_MemoryTypeIndex);
6696 
6697  json.WriteString("BlockSize");
6698  json.WriteNumber(m_PreferredBlockSize);
6699 
6700  json.WriteString("BlockCount");
6701  json.BeginObject(true);
6702  if(m_MinBlockCount > 0)
6703  {
6704  json.WriteString("Min");
6705  json.WriteNumber((uint64_t)m_MinBlockCount);
6706  }
6707  if(m_MaxBlockCount < SIZE_MAX)
6708  {
6709  json.WriteString("Max");
6710  json.WriteNumber((uint64_t)m_MaxBlockCount);
6711  }
6712  json.WriteString("Cur");
6713  json.WriteNumber((uint64_t)m_Blocks.size());
6714  json.EndObject();
6715 
6716  if(m_FrameInUseCount > 0)
6717  {
6718  json.WriteString("FrameInUseCount");
6719  json.WriteNumber(m_FrameInUseCount);
6720  }
6721  }
6722  else
6723  {
6724  json.WriteString("PreferredBlockSize");
6725  json.WriteNumber(m_PreferredBlockSize);
6726  }
6727 
6728  json.WriteString("Blocks");
6729  json.BeginArray();
6730  for(size_t i = 0; i < m_Blocks.size(); ++i)
6731  {
6732  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
6733  }
6734  json.EndArray();
6735 
6736  json.EndObject();
6737 }
6738 
6739 #endif // #if VMA_STATS_STRING_ENABLED
6740 
6741 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
6742  VmaAllocator hAllocator,
6743  uint32_t currentFrameIndex)
6744 {
6745  if(m_pDefragmentator == VMA_NULL)
6746  {
6747  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
6748  hAllocator,
6749  this,
6750  currentFrameIndex);
6751  }
6752 
6753  return m_pDefragmentator;
6754 }
6755 
6756 VkResult VmaBlockVector::Defragment(
6757  VmaDefragmentationStats* pDefragmentationStats,
6758  VkDeviceSize& maxBytesToMove,
6759  uint32_t& maxAllocationsToMove)
6760 {
6761  if(m_pDefragmentator == VMA_NULL)
6762  {
6763  return VK_SUCCESS;
6764  }
6765 
6766  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6767 
6768  // Defragment.
6769  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
6770 
6771  // Accumulate statistics.
6772  if(pDefragmentationStats != VMA_NULL)
6773  {
6774  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
6775  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
6776  pDefragmentationStats->bytesMoved += bytesMoved;
6777  pDefragmentationStats->allocationsMoved += allocationsMoved;
6778  VMA_ASSERT(bytesMoved <= maxBytesToMove);
6779  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
6780  maxBytesToMove -= bytesMoved;
6781  maxAllocationsToMove -= allocationsMoved;
6782  }
6783 
6784  // Free empty blocks.
6785  m_HasEmptyBlock = false;
6786  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
6787  {
6788  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
6789  if(pBlock->m_Metadata.IsEmpty())
6790  {
6791  if(m_Blocks.size() > m_MinBlockCount)
6792  {
6793  if(pDefragmentationStats != VMA_NULL)
6794  {
6795  ++pDefragmentationStats->deviceMemoryBlocksFreed;
6796  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
6797  }
6798 
6799  VmaVectorRemove(m_Blocks, blockIndex);
6800  pBlock->Destroy(m_hAllocator);
6801  vma_delete(m_hAllocator, pBlock);
6802  }
6803  else
6804  {
6805  m_HasEmptyBlock = true;
6806  }
6807  }
6808  }
6809 
6810  return result;
6811 }
6812 
6813 void VmaBlockVector::DestroyDefragmentator()
6814 {
6815  if(m_pDefragmentator != VMA_NULL)
6816  {
6817  vma_delete(m_hAllocator, m_pDefragmentator);
6818  m_pDefragmentator = VMA_NULL;
6819  }
6820 }
6821 
6822 void VmaBlockVector::MakePoolAllocationsLost(
6823  uint32_t currentFrameIndex,
6824  size_t* pLostAllocationCount)
6825 {
6826  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6827  size_t lostAllocationCount = 0;
6828  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6829  {
6830  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6831  VMA_ASSERT(pBlock);
6832  lostAllocationCount += pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
6833  }
6834  if(pLostAllocationCount != VMA_NULL)
6835  {
6836  *pLostAllocationCount = lostAllocationCount;
6837  }
6838 }
6839 
6840 void VmaBlockVector::AddStats(VmaStats* pStats)
6841 {
6842  const uint32_t memTypeIndex = m_MemoryTypeIndex;
6843  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
6844 
6845  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6846 
6847  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6848  {
6849  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6850  VMA_ASSERT(pBlock);
6851  VMA_HEAVY_ASSERT(pBlock->Validate());
6852  VmaStatInfo allocationStatInfo;
6853  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
6854  VmaAddStatInfo(pStats->total, allocationStatInfo);
6855  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
6856  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
6857  }
6858 }
6859 
6861 // VmaDefragmentator members definition
6862 
6863 VmaDefragmentator::VmaDefragmentator(
6864  VmaAllocator hAllocator,
6865  VmaBlockVector* pBlockVector,
6866  uint32_t currentFrameIndex) :
6867  m_hAllocator(hAllocator),
6868  m_pBlockVector(pBlockVector),
6869  m_CurrentFrameIndex(currentFrameIndex),
6870  m_BytesMoved(0),
6871  m_AllocationsMoved(0),
6872  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
6873  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
6874 {
6875 }
6876 
6877 VmaDefragmentator::~VmaDefragmentator()
6878 {
6879  for(size_t i = m_Blocks.size(); i--; )
6880  {
6881  vma_delete(m_hAllocator, m_Blocks[i]);
6882  }
6883 }
6884 
6885 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
6886 {
6887  AllocationInfo allocInfo;
6888  allocInfo.m_hAllocation = hAlloc;
6889  allocInfo.m_pChanged = pChanged;
6890  m_Allocations.push_back(allocInfo);
6891 }
6892 
6893 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
6894 {
6895  // It has already been mapped for defragmentation.
6896  if(m_pMappedDataForDefragmentation)
6897  {
6898  *ppMappedData = m_pMappedDataForDefragmentation;
6899  return VK_SUCCESS;
6900  }
6901 
6902  // It is originally mapped.
6903  if(m_pBlock->m_Mapping.GetMappedData())
6904  {
6905  *ppMappedData = m_pBlock->m_Mapping.GetMappedData();
6906  return VK_SUCCESS;
6907  }
6908 
6909  // Map on first usage.
6910  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
6911  *ppMappedData = m_pMappedDataForDefragmentation;
6912  return res;
6913 }
6914 
6915 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
6916 {
6917  if(m_pMappedDataForDefragmentation != VMA_NULL)
6918  {
6919  m_pBlock->Unmap(hAllocator, 1);
6920  }
6921 }
6922 
6923 VkResult VmaDefragmentator::DefragmentRound(
6924  VkDeviceSize maxBytesToMove,
6925  uint32_t maxAllocationsToMove)
6926 {
6927  if(m_Blocks.empty())
6928  {
6929  return VK_SUCCESS;
6930  }
6931 
6932  size_t srcBlockIndex = m_Blocks.size() - 1;
6933  size_t srcAllocIndex = SIZE_MAX;
6934  for(;;)
6935  {
6936  // 1. Find next allocation to move.
6937  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
6938  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
6939  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
6940  {
6941  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
6942  {
6943  // Finished: no more allocations to process.
6944  if(srcBlockIndex == 0)
6945  {
6946  return VK_SUCCESS;
6947  }
6948  else
6949  {
6950  --srcBlockIndex;
6951  srcAllocIndex = SIZE_MAX;
6952  }
6953  }
6954  else
6955  {
6956  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
6957  }
6958  }
6959 
6960  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
6961  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
6962 
6963  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
6964  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
6965  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
6966  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
6967 
6968  // 2. Try to find new place for this allocation in preceding or current block.
6969  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
6970  {
6971  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
6972  VmaAllocationRequest dstAllocRequest;
6973  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
6974  m_CurrentFrameIndex,
6975  m_pBlockVector->GetFrameInUseCount(),
6976  m_pBlockVector->GetBufferImageGranularity(),
6977  size,
6978  alignment,
6979  suballocType,
6980  false, // canMakeOtherLost
6981  &dstAllocRequest) &&
6982  MoveMakesSense(
6983  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
6984  {
6985  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
6986 
6987  // Reached limit on number of allocations or bytes to move.
6988  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
6989  (m_BytesMoved + size > maxBytesToMove))
6990  {
6991  return VK_INCOMPLETE;
6992  }
6993 
6994  void* pDstMappedData = VMA_NULL;
6995  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
6996  if(res != VK_SUCCESS)
6997  {
6998  return res;
6999  }
7000 
7001  void* pSrcMappedData = VMA_NULL;
7002  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
7003  if(res != VK_SUCCESS)
7004  {
7005  return res;
7006  }
7007 
7008  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
7009  memcpy(
7010  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
7011  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
7012  static_cast<size_t>(size));
7013 
7014  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
7015  pSrcBlockInfo->m_pBlock->m_Metadata.FreeAtOffset(srcOffset);
7016 
7017  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
7018 
7019  if(allocInfo.m_pChanged != VMA_NULL)
7020  {
7021  *allocInfo.m_pChanged = VK_TRUE;
7022  }
7023 
7024  ++m_AllocationsMoved;
7025  m_BytesMoved += size;
7026 
7027  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
7028 
7029  break;
7030  }
7031  }
7032 
7033  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
7034 
7035  if(srcAllocIndex > 0)
7036  {
7037  --srcAllocIndex;
7038  }
7039  else
7040  {
7041  if(srcBlockIndex > 0)
7042  {
7043  --srcBlockIndex;
7044  srcAllocIndex = SIZE_MAX;
7045  }
7046  else
7047  {
7048  return VK_SUCCESS;
7049  }
7050  }
7051  }
7052 }
7053 
7054 VkResult VmaDefragmentator::Defragment(
7055  VkDeviceSize maxBytesToMove,
7056  uint32_t maxAllocationsToMove)
7057 {
7058  if(m_Allocations.empty())
7059  {
7060  return VK_SUCCESS;
7061  }
7062 
7063  // Create block info for each block.
7064  const size_t blockCount = m_pBlockVector->m_Blocks.size();
7065  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7066  {
7067  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
7068  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
7069  m_Blocks.push_back(pBlockInfo);
7070  }
7071 
7072  // Sort them by m_pBlock pointer value.
7073  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
7074 
7075  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
7076  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
7077  {
7078  AllocationInfo& allocInfo = m_Allocations[blockIndex];
7079  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
7080  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
7081  {
7082  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
7083  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
7084  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
7085  {
7086  (*it)->m_Allocations.push_back(allocInfo);
7087  }
7088  else
7089  {
7090  VMA_ASSERT(0);
7091  }
7092  }
7093  }
7094  m_Allocations.clear();
7095 
7096  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7097  {
7098  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
7099  pBlockInfo->CalcHasNonMovableAllocations();
7100  pBlockInfo->SortAllocationsBySizeDescecnding();
7101  }
7102 
7103  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
7104  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
7105 
7106  // Execute defragmentation rounds (the main part).
7107  VkResult result = VK_SUCCESS;
7108  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
7109  {
7110  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
7111  }
7112 
7113  // Unmap blocks that were mapped for defragmentation.
7114  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7115  {
7116  m_Blocks[blockIndex]->Unmap(m_hAllocator);
7117  }
7118 
7119  return result;
7120 }
7121 
7122 bool VmaDefragmentator::MoveMakesSense(
7123  size_t dstBlockIndex, VkDeviceSize dstOffset,
7124  size_t srcBlockIndex, VkDeviceSize srcOffset)
7125 {
7126  if(dstBlockIndex < srcBlockIndex)
7127  {
7128  return true;
7129  }
7130  if(dstBlockIndex > srcBlockIndex)
7131  {
7132  return false;
7133  }
7134  if(dstOffset < srcOffset)
7135  {
7136  return true;
7137  }
7138  return false;
7139 }
7140 
7142 // VmaAllocator_T
7143 
7144 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
7145  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
7146  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
7147  m_hDevice(pCreateInfo->device),
7148  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
7149  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
7150  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
7151  m_PreferredLargeHeapBlockSize(0),
7152  m_PhysicalDevice(pCreateInfo->physicalDevice),
7153  m_CurrentFrameIndex(0),
7154  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
7155 {
7156  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
7157 
7158  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
7159  memset(&m_MemProps, 0, sizeof(m_MemProps));
7160  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
7161 
7162  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
7163  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
7164 
7165  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7166  {
7167  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
7168  }
7169 
7170  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
7171  {
7172  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
7173  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
7174  }
7175 
7176  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
7177 
7178  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
7179  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
7180 
7181  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
7182  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
7183 
7184  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
7185  {
7186  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
7187  {
7188  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
7189  if(limit != VK_WHOLE_SIZE)
7190  {
7191  m_HeapSizeLimit[heapIndex] = limit;
7192  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
7193  {
7194  m_MemProps.memoryHeaps[heapIndex].size = limit;
7195  }
7196  }
7197  }
7198  }
7199 
7200  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7201  {
7202  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
7203 
7204  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
7205  this,
7206  memTypeIndex,
7207  preferredBlockSize,
7208  0,
7209  SIZE_MAX,
7210  GetBufferImageGranularity(),
7211  pCreateInfo->frameInUseCount,
7212  false); // isCustomPool
7213  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
7214  // becase minBlockCount is 0.
7215  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
7216  }
7217 }
7218 
7219 VmaAllocator_T::~VmaAllocator_T()
7220 {
7221  VMA_ASSERT(m_Pools.empty());
7222 
7223  for(size_t i = GetMemoryTypeCount(); i--; )
7224  {
7225  vma_delete(this, m_pDedicatedAllocations[i]);
7226  vma_delete(this, m_pBlockVectors[i]);
7227  }
7228 }
7229 
7230 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
7231 {
7232 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7233  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
7234  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
7235  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
7236  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
7237  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
7238  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
7239  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
7240  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
7241  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
7242  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
7243  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
7244  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
7245  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
7246  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
7247  if(m_UseKhrDedicatedAllocation)
7248  {
7249  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
7250  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
7251  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
7252  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
7253  }
7254 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7255 
7256 #define VMA_COPY_IF_NOT_NULL(funcName) \
7257  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
7258 
7259  if(pVulkanFunctions != VMA_NULL)
7260  {
7261  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
7262  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
7263  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
7264  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
7265  VMA_COPY_IF_NOT_NULL(vkMapMemory);
7266  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
7267  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
7268  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
7269  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
7270  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
7271  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
7272  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
7273  VMA_COPY_IF_NOT_NULL(vkCreateImage);
7274  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
7275  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
7276  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
7277  }
7278 
7279 #undef VMA_COPY_IF_NOT_NULL
7280 
7281  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
7282  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
7283  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
7284  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
7285  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
7286  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
7287  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
7288  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
7289  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
7290  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
7291  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
7292  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
7293  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
7294  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
7295  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
7296  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
7297  if(m_UseKhrDedicatedAllocation)
7298  {
7299  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
7300  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
7301  }
7302 }
7303 
7304 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
7305 {
7306  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7307  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
7308  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
7309  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
7310 }
7311 
7312 VkResult VmaAllocator_T::AllocateMemoryOfType(
7313  const VkMemoryRequirements& vkMemReq,
7314  bool dedicatedAllocation,
7315  VkBuffer dedicatedBuffer,
7316  VkImage dedicatedImage,
7317  const VmaAllocationCreateInfo& createInfo,
7318  uint32_t memTypeIndex,
7319  VmaSuballocationType suballocType,
7320  VmaAllocation* pAllocation)
7321 {
7322  VMA_ASSERT(pAllocation != VMA_NULL);
7323  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
7324 
7325  VmaAllocationCreateInfo finalCreateInfo = createInfo;
7326 
7327  // If memory type is not HOST_VISIBLE, disable MAPPED.
7328  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7329  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
7330  {
7331  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
7332  }
7333 
7334  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
7335  VMA_ASSERT(blockVector);
7336 
7337  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
7338  bool preferDedicatedMemory =
7339  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
7340  dedicatedAllocation ||
7341  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
7342  vkMemReq.size > preferredBlockSize / 2;
7343 
7344  if(preferDedicatedMemory &&
7345  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
7346  finalCreateInfo.pool == VK_NULL_HANDLE)
7347  {
7349  }
7350 
7351  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
7352  {
7353  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7354  {
7355  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7356  }
7357  else
7358  {
7359  return AllocateDedicatedMemory(
7360  vkMemReq.size,
7361  suballocType,
7362  memTypeIndex,
7363  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
7364  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
7365  finalCreateInfo.pUserData,
7366  dedicatedBuffer,
7367  dedicatedImage,
7368  pAllocation);
7369  }
7370  }
7371  else
7372  {
7373  VkResult res = blockVector->Allocate(
7374  VK_NULL_HANDLE, // hCurrentPool
7375  m_CurrentFrameIndex.load(),
7376  vkMemReq,
7377  finalCreateInfo,
7378  suballocType,
7379  pAllocation);
7380  if(res == VK_SUCCESS)
7381  {
7382  return res;
7383  }
7384 
7385  // 5. Try dedicated memory.
7386  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7387  {
7388  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7389  }
7390  else
7391  {
7392  res = AllocateDedicatedMemory(
7393  vkMemReq.size,
7394  suballocType,
7395  memTypeIndex,
7396  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
7397  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
7398  finalCreateInfo.pUserData,
7399  dedicatedBuffer,
7400  dedicatedImage,
7401  pAllocation);
7402  if(res == VK_SUCCESS)
7403  {
7404  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
7405  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
7406  return VK_SUCCESS;
7407  }
7408  else
7409  {
7410  // Everything failed: Return error code.
7411  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7412  return res;
7413  }
7414  }
7415  }
7416 }
7417 
7418 VkResult VmaAllocator_T::AllocateDedicatedMemory(
7419  VkDeviceSize size,
7420  VmaSuballocationType suballocType,
7421  uint32_t memTypeIndex,
7422  bool map,
7423  bool isUserDataString,
7424  void* pUserData,
7425  VkBuffer dedicatedBuffer,
7426  VkImage dedicatedImage,
7427  VmaAllocation* pAllocation)
7428 {
7429  VMA_ASSERT(pAllocation);
7430 
7431  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
7432  allocInfo.memoryTypeIndex = memTypeIndex;
7433  allocInfo.allocationSize = size;
7434 
7435  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
7436  if(m_UseKhrDedicatedAllocation)
7437  {
7438  if(dedicatedBuffer != VK_NULL_HANDLE)
7439  {
7440  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
7441  dedicatedAllocInfo.buffer = dedicatedBuffer;
7442  allocInfo.pNext = &dedicatedAllocInfo;
7443  }
7444  else if(dedicatedImage != VK_NULL_HANDLE)
7445  {
7446  dedicatedAllocInfo.image = dedicatedImage;
7447  allocInfo.pNext = &dedicatedAllocInfo;
7448  }
7449  }
7450 
7451  // Allocate VkDeviceMemory.
7452  VkDeviceMemory hMemory = VK_NULL_HANDLE;
7453  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
7454  if(res < 0)
7455  {
7456  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7457  return res;
7458  }
7459 
7460  void* pMappedData = VMA_NULL;
7461  if(map)
7462  {
7463  res = (*m_VulkanFunctions.vkMapMemory)(
7464  m_hDevice,
7465  hMemory,
7466  0,
7467  VK_WHOLE_SIZE,
7468  0,
7469  &pMappedData);
7470  if(res < 0)
7471  {
7472  VMA_DEBUG_LOG(" vkMapMemory FAILED");
7473  FreeVulkanMemory(memTypeIndex, size, hMemory);
7474  return res;
7475  }
7476  }
7477 
7478  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
7479  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
7480  (*pAllocation)->SetUserData(this, pUserData);
7481 
7482  // Register it in m_pDedicatedAllocations.
7483  {
7484  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7485  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
7486  VMA_ASSERT(pDedicatedAllocations);
7487  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
7488  }
7489 
7490  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
7491 
7492  return VK_SUCCESS;
7493 }
7494 
7495 void VmaAllocator_T::GetBufferMemoryRequirements(
7496  VkBuffer hBuffer,
7497  VkMemoryRequirements& memReq,
7498  bool& requiresDedicatedAllocation,
7499  bool& prefersDedicatedAllocation) const
7500 {
7501  if(m_UseKhrDedicatedAllocation)
7502  {
7503  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
7504  memReqInfo.buffer = hBuffer;
7505 
7506  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7507 
7508  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7509  memReq2.pNext = &memDedicatedReq;
7510 
7511  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7512 
7513  memReq = memReq2.memoryRequirements;
7514  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7515  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7516  }
7517  else
7518  {
7519  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
7520  requiresDedicatedAllocation = false;
7521  prefersDedicatedAllocation = false;
7522  }
7523 }
7524 
7525 void VmaAllocator_T::GetImageMemoryRequirements(
7526  VkImage hImage,
7527  VkMemoryRequirements& memReq,
7528  bool& requiresDedicatedAllocation,
7529  bool& prefersDedicatedAllocation) const
7530 {
7531  if(m_UseKhrDedicatedAllocation)
7532  {
7533  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
7534  memReqInfo.image = hImage;
7535 
7536  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7537 
7538  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7539  memReq2.pNext = &memDedicatedReq;
7540 
7541  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7542 
7543  memReq = memReq2.memoryRequirements;
7544  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7545  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7546  }
7547  else
7548  {
7549  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
7550  requiresDedicatedAllocation = false;
7551  prefersDedicatedAllocation = false;
7552  }
7553 }
7554 
7555 VkResult VmaAllocator_T::AllocateMemory(
7556  const VkMemoryRequirements& vkMemReq,
7557  bool requiresDedicatedAllocation,
7558  bool prefersDedicatedAllocation,
7559  VkBuffer dedicatedBuffer,
7560  VkImage dedicatedImage,
7561  const VmaAllocationCreateInfo& createInfo,
7562  VmaSuballocationType suballocType,
7563  VmaAllocation* pAllocation)
7564 {
7565  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
7566  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7567  {
7568  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
7569  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7570  }
7571  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7573  {
7574  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
7575  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7576  }
7577  if(requiresDedicatedAllocation)
7578  {
7579  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7580  {
7581  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
7582  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7583  }
7584  if(createInfo.pool != VK_NULL_HANDLE)
7585  {
7586  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
7587  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7588  }
7589  }
7590  if((createInfo.pool != VK_NULL_HANDLE) &&
7591  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
7592  {
7593  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
7594  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7595  }
7596 
7597  if(createInfo.pool != VK_NULL_HANDLE)
7598  {
7599  return createInfo.pool->m_BlockVector.Allocate(
7600  createInfo.pool,
7601  m_CurrentFrameIndex.load(),
7602  vkMemReq,
7603  createInfo,
7604  suballocType,
7605  pAllocation);
7606  }
7607  else
7608  {
7609  // Bit mask of memory Vulkan types acceptable for this allocation.
7610  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
7611  uint32_t memTypeIndex = UINT32_MAX;
7612  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7613  if(res == VK_SUCCESS)
7614  {
7615  res = AllocateMemoryOfType(
7616  vkMemReq,
7617  requiresDedicatedAllocation || prefersDedicatedAllocation,
7618  dedicatedBuffer,
7619  dedicatedImage,
7620  createInfo,
7621  memTypeIndex,
7622  suballocType,
7623  pAllocation);
7624  // Succeeded on first try.
7625  if(res == VK_SUCCESS)
7626  {
7627  return res;
7628  }
7629  // Allocation from this memory type failed. Try other compatible memory types.
7630  else
7631  {
7632  for(;;)
7633  {
7634  // Remove old memTypeIndex from list of possibilities.
7635  memoryTypeBits &= ~(1u << memTypeIndex);
7636  // Find alternative memTypeIndex.
7637  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7638  if(res == VK_SUCCESS)
7639  {
7640  res = AllocateMemoryOfType(
7641  vkMemReq,
7642  requiresDedicatedAllocation || prefersDedicatedAllocation,
7643  dedicatedBuffer,
7644  dedicatedImage,
7645  createInfo,
7646  memTypeIndex,
7647  suballocType,
7648  pAllocation);
7649  // Allocation from this alternative memory type succeeded.
7650  if(res == VK_SUCCESS)
7651  {
7652  return res;
7653  }
7654  // else: Allocation from this memory type failed. Try next one - next loop iteration.
7655  }
7656  // No other matching memory type index could be found.
7657  else
7658  {
7659  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
7660  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7661  }
7662  }
7663  }
7664  }
7665  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
7666  else
7667  return res;
7668  }
7669 }
7670 
7671 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
7672 {
7673  VMA_ASSERT(allocation);
7674 
7675  if(allocation->CanBecomeLost() == false ||
7676  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
7677  {
7678  switch(allocation->GetType())
7679  {
7680  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7681  {
7682  VmaBlockVector* pBlockVector = VMA_NULL;
7683  VmaPool hPool = allocation->GetPool();
7684  if(hPool != VK_NULL_HANDLE)
7685  {
7686  pBlockVector = &hPool->m_BlockVector;
7687  }
7688  else
7689  {
7690  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
7691  pBlockVector = m_pBlockVectors[memTypeIndex];
7692  }
7693  pBlockVector->Free(allocation);
7694  }
7695  break;
7696  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7697  FreeDedicatedMemory(allocation);
7698  break;
7699  default:
7700  VMA_ASSERT(0);
7701  }
7702  }
7703 
7704  allocation->SetUserData(this, VMA_NULL);
7705  vma_delete(this, allocation);
7706 }
7707 
7708 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
7709 {
7710  // Initialize.
7711  InitStatInfo(pStats->total);
7712  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
7713  InitStatInfo(pStats->memoryType[i]);
7714  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7715  InitStatInfo(pStats->memoryHeap[i]);
7716 
7717  // Process default pools.
7718  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7719  {
7720  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
7721  VMA_ASSERT(pBlockVector);
7722  pBlockVector->AddStats(pStats);
7723  }
7724 
7725  // Process custom pools.
7726  {
7727  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7728  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
7729  {
7730  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
7731  }
7732  }
7733 
7734  // Process dedicated allocations.
7735  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7736  {
7737  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7738  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7739  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
7740  VMA_ASSERT(pDedicatedAllocVector);
7741  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
7742  {
7743  VmaStatInfo allocationStatInfo;
7744  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
7745  VmaAddStatInfo(pStats->total, allocationStatInfo);
7746  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
7747  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
7748  }
7749  }
7750 
7751  // Postprocess.
7752  VmaPostprocessCalcStatInfo(pStats->total);
7753  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
7754  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
7755  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
7756  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
7757 }
7758 
7759 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
7760 
7761 VkResult VmaAllocator_T::Defragment(
7762  VmaAllocation* pAllocations,
7763  size_t allocationCount,
7764  VkBool32* pAllocationsChanged,
7765  const VmaDefragmentationInfo* pDefragmentationInfo,
7766  VmaDefragmentationStats* pDefragmentationStats)
7767 {
7768  if(pAllocationsChanged != VMA_NULL)
7769  {
7770  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
7771  }
7772  if(pDefragmentationStats != VMA_NULL)
7773  {
7774  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
7775  }
7776 
7777  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
7778 
7779  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
7780 
7781  const size_t poolCount = m_Pools.size();
7782 
7783  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
7784  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
7785  {
7786  VmaAllocation hAlloc = pAllocations[allocIndex];
7787  VMA_ASSERT(hAlloc);
7788  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
7789  // DedicatedAlloc cannot be defragmented.
7790  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
7791  // Only HOST_VISIBLE memory types can be defragmented.
7792  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
7793  // Lost allocation cannot be defragmented.
7794  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
7795  {
7796  VmaBlockVector* pAllocBlockVector = VMA_NULL;
7797 
7798  const VmaPool hAllocPool = hAlloc->GetPool();
7799  // This allocation belongs to custom pool.
7800  if(hAllocPool != VK_NULL_HANDLE)
7801  {
7802  pAllocBlockVector = &hAllocPool->GetBlockVector();
7803  }
7804  // This allocation belongs to general pool.
7805  else
7806  {
7807  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
7808  }
7809 
7810  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
7811 
7812  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
7813  &pAllocationsChanged[allocIndex] : VMA_NULL;
7814  pDefragmentator->AddAllocation(hAlloc, pChanged);
7815  }
7816  }
7817 
7818  VkResult result = VK_SUCCESS;
7819 
7820  // ======== Main processing.
7821 
7822  VkDeviceSize maxBytesToMove = SIZE_MAX;
7823  uint32_t maxAllocationsToMove = UINT32_MAX;
7824  if(pDefragmentationInfo != VMA_NULL)
7825  {
7826  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
7827  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
7828  }
7829 
7830  // Process standard memory.
7831  for(uint32_t memTypeIndex = 0;
7832  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
7833  ++memTypeIndex)
7834  {
7835  // Only HOST_VISIBLE memory types can be defragmented.
7836  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7837  {
7838  result = m_pBlockVectors[memTypeIndex]->Defragment(
7839  pDefragmentationStats,
7840  maxBytesToMove,
7841  maxAllocationsToMove);
7842  }
7843  }
7844 
7845  // Process custom pools.
7846  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
7847  {
7848  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
7849  pDefragmentationStats,
7850  maxBytesToMove,
7851  maxAllocationsToMove);
7852  }
7853 
7854  // ======== Destroy defragmentators.
7855 
7856  // Process custom pools.
7857  for(size_t poolIndex = poolCount; poolIndex--; )
7858  {
7859  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
7860  }
7861 
7862  // Process standard memory.
7863  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
7864  {
7865  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7866  {
7867  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
7868  }
7869  }
7870 
7871  return result;
7872 }
7873 
7874 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
7875 {
7876  if(hAllocation->CanBecomeLost())
7877  {
7878  /*
7879  Warning: This is a carefully designed algorithm.
7880  Do not modify unless you really know what you're doing :)
7881  */
7882  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
7883  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
7884  for(;;)
7885  {
7886  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7887  {
7888  pAllocationInfo->memoryType = UINT32_MAX;
7889  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
7890  pAllocationInfo->offset = 0;
7891  pAllocationInfo->size = hAllocation->GetSize();
7892  pAllocationInfo->pMappedData = VMA_NULL;
7893  pAllocationInfo->pUserData = hAllocation->GetUserData();
7894  return;
7895  }
7896  else if(localLastUseFrameIndex == localCurrFrameIndex)
7897  {
7898  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7899  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7900  pAllocationInfo->offset = hAllocation->GetOffset();
7901  pAllocationInfo->size = hAllocation->GetSize();
7902  pAllocationInfo->pMappedData = VMA_NULL;
7903  pAllocationInfo->pUserData = hAllocation->GetUserData();
7904  return;
7905  }
7906  else // Last use time earlier than current time.
7907  {
7908  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
7909  {
7910  localLastUseFrameIndex = localCurrFrameIndex;
7911  }
7912  }
7913  }
7914  }
7915  else
7916  {
7917  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7918  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7919  pAllocationInfo->offset = hAllocation->GetOffset();
7920  pAllocationInfo->size = hAllocation->GetSize();
7921  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
7922  pAllocationInfo->pUserData = hAllocation->GetUserData();
7923  }
7924 }
7925 
7926 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
7927 {
7928  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
7929  if(hAllocation->CanBecomeLost())
7930  {
7931  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
7932  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
7933  for(;;)
7934  {
7935  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7936  {
7937  return false;
7938  }
7939  else if(localLastUseFrameIndex == localCurrFrameIndex)
7940  {
7941  return true;
7942  }
7943  else // Last use time earlier than current time.
7944  {
7945  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
7946  {
7947  localLastUseFrameIndex = localCurrFrameIndex;
7948  }
7949  }
7950  }
7951  }
7952  else
7953  {
7954  return true;
7955  }
7956 }
7957 
7958 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
7959 {
7960  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
7961 
7962  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
7963 
7964  if(newCreateInfo.maxBlockCount == 0)
7965  {
7966  newCreateInfo.maxBlockCount = SIZE_MAX;
7967  }
7968  if(newCreateInfo.blockSize == 0)
7969  {
7970  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
7971  }
7972 
7973  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
7974 
7975  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
7976  if(res != VK_SUCCESS)
7977  {
7978  vma_delete(this, *pPool);
7979  *pPool = VMA_NULL;
7980  return res;
7981  }
7982 
7983  // Add to m_Pools.
7984  {
7985  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7986  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
7987  }
7988 
7989  return VK_SUCCESS;
7990 }
7991 
7992 void VmaAllocator_T::DestroyPool(VmaPool pool)
7993 {
7994  // Remove from m_Pools.
7995  {
7996  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7997  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
7998  VMA_ASSERT(success && "Pool not found in Allocator.");
7999  }
8000 
8001  vma_delete(this, pool);
8002 }
8003 
8004 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
8005 {
8006  pool->m_BlockVector.GetPoolStats(pPoolStats);
8007 }
8008 
8009 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
8010 {
8011  m_CurrentFrameIndex.store(frameIndex);
8012 }
8013 
8014 void VmaAllocator_T::MakePoolAllocationsLost(
8015  VmaPool hPool,
8016  size_t* pLostAllocationCount)
8017 {
8018  hPool->m_BlockVector.MakePoolAllocationsLost(
8019  m_CurrentFrameIndex.load(),
8020  pLostAllocationCount);
8021 }
8022 
8023 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
8024 {
8025  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
8026  (*pAllocation)->InitLost();
8027 }
8028 
8029 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
8030 {
8031  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
8032 
8033  VkResult res;
8034  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
8035  {
8036  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
8037  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
8038  {
8039  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
8040  if(res == VK_SUCCESS)
8041  {
8042  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
8043  }
8044  }
8045  else
8046  {
8047  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
8048  }
8049  }
8050  else
8051  {
8052  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
8053  }
8054 
8055  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
8056  {
8057  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
8058  }
8059 
8060  return res;
8061 }
8062 
8063 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
8064 {
8065  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
8066  {
8067  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
8068  }
8069 
8070  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
8071 
8072  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
8073  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
8074  {
8075  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
8076  m_HeapSizeLimit[heapIndex] += size;
8077  }
8078 }
8079 
8080 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
8081 {
8082  if(hAllocation->CanBecomeLost())
8083  {
8084  return VK_ERROR_MEMORY_MAP_FAILED;
8085  }
8086 
8087  switch(hAllocation->GetType())
8088  {
8089  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8090  {
8091  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
8092  char *pBytes = VMA_NULL;
8093  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
8094  if(res == VK_SUCCESS)
8095  {
8096  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
8097  hAllocation->BlockAllocMap();
8098  }
8099  return res;
8100  }
8101  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8102  return hAllocation->DedicatedAllocMap(this, ppData);
8103  default:
8104  VMA_ASSERT(0);
8105  return VK_ERROR_MEMORY_MAP_FAILED;
8106  }
8107 }
8108 
8109 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
8110 {
8111  switch(hAllocation->GetType())
8112  {
8113  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8114  {
8115  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
8116  hAllocation->BlockAllocUnmap();
8117  pBlock->Unmap(this, 1);
8118  }
8119  break;
8120  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8121  hAllocation->DedicatedAllocUnmap(this);
8122  break;
8123  default:
8124  VMA_ASSERT(0);
8125  }
8126 }
8127 
8128 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
8129 {
8130  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
8131 
8132  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
8133  {
8134  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
8135  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
8136  VMA_ASSERT(pDedicatedAllocations);
8137  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
8138  VMA_ASSERT(success);
8139  }
8140 
8141  VkDeviceMemory hMemory = allocation->GetMemory();
8142 
8143  if(allocation->GetMappedData() != VMA_NULL)
8144  {
8145  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
8146  }
8147 
8148  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
8149 
8150  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
8151 }
8152 
8153 #if VMA_STATS_STRING_ENABLED
8154 
8155 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
8156 {
8157  bool dedicatedAllocationsStarted = false;
8158  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8159  {
8160  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
8161  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
8162  VMA_ASSERT(pDedicatedAllocVector);
8163  if(pDedicatedAllocVector->empty() == false)
8164  {
8165  if(dedicatedAllocationsStarted == false)
8166  {
8167  dedicatedAllocationsStarted = true;
8168  json.WriteString("DedicatedAllocations");
8169  json.BeginObject();
8170  }
8171 
8172  json.BeginString("Type ");
8173  json.ContinueString(memTypeIndex);
8174  json.EndString();
8175 
8176  json.BeginArray();
8177 
8178  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
8179  {
8180  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
8181  json.BeginObject(true);
8182 
8183  json.WriteString("Type");
8184  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
8185 
8186  json.WriteString("Size");
8187  json.WriteNumber(hAlloc->GetSize());
8188 
8189  const void* pUserData = hAlloc->GetUserData();
8190  if(pUserData != VMA_NULL)
8191  {
8192  json.WriteString("UserData");
8193  if(hAlloc->IsUserDataString())
8194  {
8195  json.WriteString((const char*)pUserData);
8196  }
8197  else
8198  {
8199  json.BeginString();
8200  json.ContinueString_Pointer(pUserData);
8201  json.EndString();
8202  }
8203  }
8204 
8205  json.EndObject();
8206  }
8207 
8208  json.EndArray();
8209  }
8210  }
8211  if(dedicatedAllocationsStarted)
8212  {
8213  json.EndObject();
8214  }
8215 
8216  {
8217  bool allocationsStarted = false;
8218  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8219  {
8220  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
8221  {
8222  if(allocationsStarted == false)
8223  {
8224  allocationsStarted = true;
8225  json.WriteString("DefaultPools");
8226  json.BeginObject();
8227  }
8228 
8229  json.BeginString("Type ");
8230  json.ContinueString(memTypeIndex);
8231  json.EndString();
8232 
8233  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
8234  }
8235  }
8236  if(allocationsStarted)
8237  {
8238  json.EndObject();
8239  }
8240  }
8241 
8242  {
8243  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
8244  const size_t poolCount = m_Pools.size();
8245  if(poolCount > 0)
8246  {
8247  json.WriteString("Pools");
8248  json.BeginArray();
8249  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
8250  {
8251  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
8252  }
8253  json.EndArray();
8254  }
8255  }
8256 }
8257 
8258 #endif // #if VMA_STATS_STRING_ENABLED
8259 
8260 static VkResult AllocateMemoryForImage(
8261  VmaAllocator allocator,
8262  VkImage image,
8263  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8264  VmaSuballocationType suballocType,
8265  VmaAllocation* pAllocation)
8266 {
8267  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
8268 
8269  VkMemoryRequirements vkMemReq = {};
8270  bool requiresDedicatedAllocation = false;
8271  bool prefersDedicatedAllocation = false;
8272  allocator->GetImageMemoryRequirements(image, vkMemReq,
8273  requiresDedicatedAllocation, prefersDedicatedAllocation);
8274 
8275  return allocator->AllocateMemory(
8276  vkMemReq,
8277  requiresDedicatedAllocation,
8278  prefersDedicatedAllocation,
8279  VK_NULL_HANDLE, // dedicatedBuffer
8280  image, // dedicatedImage
8281  *pAllocationCreateInfo,
8282  suballocType,
8283  pAllocation);
8284 }
8285 
8287 // Public interface
8288 
8289 VkResult vmaCreateAllocator(
8290  const VmaAllocatorCreateInfo* pCreateInfo,
8291  VmaAllocator* pAllocator)
8292 {
8293  VMA_ASSERT(pCreateInfo && pAllocator);
8294  VMA_DEBUG_LOG("vmaCreateAllocator");
8295  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
8296  return VK_SUCCESS;
8297 }
8298 
8299 void vmaDestroyAllocator(
8300  VmaAllocator allocator)
8301 {
8302  if(allocator != VK_NULL_HANDLE)
8303  {
8304  VMA_DEBUG_LOG("vmaDestroyAllocator");
8305  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
8306  vma_delete(&allocationCallbacks, allocator);
8307  }
8308 }
8309 
8311  VmaAllocator allocator,
8312  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
8313 {
8314  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
8315  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
8316 }
8317 
8319  VmaAllocator allocator,
8320  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
8321 {
8322  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
8323  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
8324 }
8325 
8327  VmaAllocator allocator,
8328  uint32_t memoryTypeIndex,
8329  VkMemoryPropertyFlags* pFlags)
8330 {
8331  VMA_ASSERT(allocator && pFlags);
8332  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
8333  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
8334 }
8335 
8337  VmaAllocator allocator,
8338  uint32_t frameIndex)
8339 {
8340  VMA_ASSERT(allocator);
8341  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
8342 
8343  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8344 
8345  allocator->SetCurrentFrameIndex(frameIndex);
8346 }
8347 
8348 void vmaCalculateStats(
8349  VmaAllocator allocator,
8350  VmaStats* pStats)
8351 {
8352  VMA_ASSERT(allocator && pStats);
8353  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8354  allocator->CalculateStats(pStats);
8355 }
8356 
8357 #if VMA_STATS_STRING_ENABLED
8358 
8359 void vmaBuildStatsString(
8360  VmaAllocator allocator,
8361  char** ppStatsString,
8362  VkBool32 detailedMap)
8363 {
8364  VMA_ASSERT(allocator && ppStatsString);
8365  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8366 
8367  VmaStringBuilder sb(allocator);
8368  {
8369  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
8370  json.BeginObject();
8371 
8372  VmaStats stats;
8373  allocator->CalculateStats(&stats);
8374 
8375  json.WriteString("Total");
8376  VmaPrintStatInfo(json, stats.total);
8377 
8378  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
8379  {
8380  json.BeginString("Heap ");
8381  json.ContinueString(heapIndex);
8382  json.EndString();
8383  json.BeginObject();
8384 
8385  json.WriteString("Size");
8386  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
8387 
8388  json.WriteString("Flags");
8389  json.BeginArray(true);
8390  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
8391  {
8392  json.WriteString("DEVICE_LOCAL");
8393  }
8394  json.EndArray();
8395 
8396  if(stats.memoryHeap[heapIndex].blockCount > 0)
8397  {
8398  json.WriteString("Stats");
8399  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
8400  }
8401 
8402  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
8403  {
8404  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
8405  {
8406  json.BeginString("Type ");
8407  json.ContinueString(typeIndex);
8408  json.EndString();
8409 
8410  json.BeginObject();
8411 
8412  json.WriteString("Flags");
8413  json.BeginArray(true);
8414  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
8415  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
8416  {
8417  json.WriteString("DEVICE_LOCAL");
8418  }
8419  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
8420  {
8421  json.WriteString("HOST_VISIBLE");
8422  }
8423  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
8424  {
8425  json.WriteString("HOST_COHERENT");
8426  }
8427  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
8428  {
8429  json.WriteString("HOST_CACHED");
8430  }
8431  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
8432  {
8433  json.WriteString("LAZILY_ALLOCATED");
8434  }
8435  json.EndArray();
8436 
8437  if(stats.memoryType[typeIndex].blockCount > 0)
8438  {
8439  json.WriteString("Stats");
8440  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
8441  }
8442 
8443  json.EndObject();
8444  }
8445  }
8446 
8447  json.EndObject();
8448  }
8449  if(detailedMap == VK_TRUE)
8450  {
8451  allocator->PrintDetailedMap(json);
8452  }
8453 
8454  json.EndObject();
8455  }
8456 
8457  const size_t len = sb.GetLength();
8458  char* const pChars = vma_new_array(allocator, char, len + 1);
8459  if(len > 0)
8460  {
8461  memcpy(pChars, sb.GetData(), len);
8462  }
8463  pChars[len] = '\0';
8464  *ppStatsString = pChars;
8465 }
8466 
8467 void vmaFreeStatsString(
8468  VmaAllocator allocator,
8469  char* pStatsString)
8470 {
8471  if(pStatsString != VMA_NULL)
8472  {
8473  VMA_ASSERT(allocator);
8474  size_t len = strlen(pStatsString);
8475  vma_delete_array(allocator, pStatsString, len + 1);
8476  }
8477 }
8478 
8479 #endif // #if VMA_STATS_STRING_ENABLED
8480 
8481 /*
8482 This function is not protected by any mutex because it just reads immutable data.
8483 */
8484 VkResult vmaFindMemoryTypeIndex(
8485  VmaAllocator allocator,
8486  uint32_t memoryTypeBits,
8487  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8488  uint32_t* pMemoryTypeIndex)
8489 {
8490  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8491  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8492  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8493 
8494  if(pAllocationCreateInfo->memoryTypeBits != 0)
8495  {
8496  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
8497  }
8498 
8499  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
8500  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
8501 
8502  // Convert usage to requiredFlags and preferredFlags.
8503  switch(pAllocationCreateInfo->usage)
8504  {
8506  break;
8508  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8509  break;
8511  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
8512  break;
8514  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8515  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8516  break;
8518  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8519  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
8520  break;
8521  default:
8522  break;
8523  }
8524 
8525  *pMemoryTypeIndex = UINT32_MAX;
8526  uint32_t minCost = UINT32_MAX;
8527  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
8528  memTypeIndex < allocator->GetMemoryTypeCount();
8529  ++memTypeIndex, memTypeBit <<= 1)
8530  {
8531  // This memory type is acceptable according to memoryTypeBits bitmask.
8532  if((memTypeBit & memoryTypeBits) != 0)
8533  {
8534  const VkMemoryPropertyFlags currFlags =
8535  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
8536  // This memory type contains requiredFlags.
8537  if((requiredFlags & ~currFlags) == 0)
8538  {
8539  // Calculate cost as number of bits from preferredFlags not present in this memory type.
8540  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
8541  // Remember memory type with lowest cost.
8542  if(currCost < minCost)
8543  {
8544  *pMemoryTypeIndex = memTypeIndex;
8545  if(currCost == 0)
8546  {
8547  return VK_SUCCESS;
8548  }
8549  minCost = currCost;
8550  }
8551  }
8552  }
8553  }
8554  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
8555 }
8556 
8558  VmaAllocator allocator,
8559  const VkBufferCreateInfo* pBufferCreateInfo,
8560  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8561  uint32_t* pMemoryTypeIndex)
8562 {
8563  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8564  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
8565  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8566  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8567 
8568  const VkDevice hDev = allocator->m_hDevice;
8569  VkBuffer hBuffer = VK_NULL_HANDLE;
8570  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
8571  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
8572  if(res == VK_SUCCESS)
8573  {
8574  VkMemoryRequirements memReq = {};
8575  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
8576  hDev, hBuffer, &memReq);
8577 
8578  res = vmaFindMemoryTypeIndex(
8579  allocator,
8580  memReq.memoryTypeBits,
8581  pAllocationCreateInfo,
8582  pMemoryTypeIndex);
8583 
8584  allocator->GetVulkanFunctions().vkDestroyBuffer(
8585  hDev, hBuffer, allocator->GetAllocationCallbacks());
8586  }
8587  return res;
8588 }
8589 
8591  VmaAllocator allocator,
8592  const VkImageCreateInfo* pImageCreateInfo,
8593  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8594  uint32_t* pMemoryTypeIndex)
8595 {
8596  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8597  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
8598  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8599  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8600 
8601  const VkDevice hDev = allocator->m_hDevice;
8602  VkImage hImage = VK_NULL_HANDLE;
8603  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
8604  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
8605  if(res == VK_SUCCESS)
8606  {
8607  VkMemoryRequirements memReq = {};
8608  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
8609  hDev, hImage, &memReq);
8610 
8611  res = vmaFindMemoryTypeIndex(
8612  allocator,
8613  memReq.memoryTypeBits,
8614  pAllocationCreateInfo,
8615  pMemoryTypeIndex);
8616 
8617  allocator->GetVulkanFunctions().vkDestroyImage(
8618  hDev, hImage, allocator->GetAllocationCallbacks());
8619  }
8620  return res;
8621 }
8622 
8623 VkResult vmaCreatePool(
8624  VmaAllocator allocator,
8625  const VmaPoolCreateInfo* pCreateInfo,
8626  VmaPool* pPool)
8627 {
8628  VMA_ASSERT(allocator && pCreateInfo && pPool);
8629 
8630  VMA_DEBUG_LOG("vmaCreatePool");
8631 
8632  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8633 
8634  return allocator->CreatePool(pCreateInfo, pPool);
8635 }
8636 
8637 void vmaDestroyPool(
8638  VmaAllocator allocator,
8639  VmaPool pool)
8640 {
8641  VMA_ASSERT(allocator);
8642 
8643  if(pool == VK_NULL_HANDLE)
8644  {
8645  return;
8646  }
8647 
8648  VMA_DEBUG_LOG("vmaDestroyPool");
8649 
8650  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8651 
8652  allocator->DestroyPool(pool);
8653 }
8654 
8655 void vmaGetPoolStats(
8656  VmaAllocator allocator,
8657  VmaPool pool,
8658  VmaPoolStats* pPoolStats)
8659 {
8660  VMA_ASSERT(allocator && pool && pPoolStats);
8661 
8662  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8663 
8664  allocator->GetPoolStats(pool, pPoolStats);
8665 }
8666 
8668  VmaAllocator allocator,
8669  VmaPool pool,
8670  size_t* pLostAllocationCount)
8671 {
8672  VMA_ASSERT(allocator && pool);
8673 
8674  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8675 
8676  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
8677 }
8678 
8679 VkResult vmaAllocateMemory(
8680  VmaAllocator allocator,
8681  const VkMemoryRequirements* pVkMemoryRequirements,
8682  const VmaAllocationCreateInfo* pCreateInfo,
8683  VmaAllocation* pAllocation,
8684  VmaAllocationInfo* pAllocationInfo)
8685 {
8686  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
8687 
8688  VMA_DEBUG_LOG("vmaAllocateMemory");
8689 
8690  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8691 
8692  VkResult result = allocator->AllocateMemory(
8693  *pVkMemoryRequirements,
8694  false, // requiresDedicatedAllocation
8695  false, // prefersDedicatedAllocation
8696  VK_NULL_HANDLE, // dedicatedBuffer
8697  VK_NULL_HANDLE, // dedicatedImage
8698  *pCreateInfo,
8699  VMA_SUBALLOCATION_TYPE_UNKNOWN,
8700  pAllocation);
8701 
8702  if(pAllocationInfo && result == VK_SUCCESS)
8703  {
8704  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8705  }
8706 
8707  return result;
8708 }
8709 
8711  VmaAllocator allocator,
8712  VkBuffer buffer,
8713  const VmaAllocationCreateInfo* pCreateInfo,
8714  VmaAllocation* pAllocation,
8715  VmaAllocationInfo* pAllocationInfo)
8716 {
8717  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
8718 
8719  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
8720 
8721  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8722 
8723  VkMemoryRequirements vkMemReq = {};
8724  bool requiresDedicatedAllocation = false;
8725  bool prefersDedicatedAllocation = false;
8726  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
8727  requiresDedicatedAllocation,
8728  prefersDedicatedAllocation);
8729 
8730  VkResult result = allocator->AllocateMemory(
8731  vkMemReq,
8732  requiresDedicatedAllocation,
8733  prefersDedicatedAllocation,
8734  buffer, // dedicatedBuffer
8735  VK_NULL_HANDLE, // dedicatedImage
8736  *pCreateInfo,
8737  VMA_SUBALLOCATION_TYPE_BUFFER,
8738  pAllocation);
8739 
8740  if(pAllocationInfo && result == VK_SUCCESS)
8741  {
8742  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8743  }
8744 
8745  return result;
8746 }
8747 
8748 VkResult vmaAllocateMemoryForImage(
8749  VmaAllocator allocator,
8750  VkImage image,
8751  const VmaAllocationCreateInfo* pCreateInfo,
8752  VmaAllocation* pAllocation,
8753  VmaAllocationInfo* pAllocationInfo)
8754 {
8755  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
8756 
8757  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
8758 
8759  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8760 
8761  VkResult result = AllocateMemoryForImage(
8762  allocator,
8763  image,
8764  pCreateInfo,
8765  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
8766  pAllocation);
8767 
8768  if(pAllocationInfo && result == VK_SUCCESS)
8769  {
8770  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8771  }
8772 
8773  return result;
8774 }
8775 
8776 void vmaFreeMemory(
8777  VmaAllocator allocator,
8778  VmaAllocation allocation)
8779 {
8780  VMA_ASSERT(allocator && allocation);
8781 
8782  VMA_DEBUG_LOG("vmaFreeMemory");
8783 
8784  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8785 
8786  allocator->FreeMemory(allocation);
8787 }
8788 
8790  VmaAllocator allocator,
8791  VmaAllocation allocation,
8792  VmaAllocationInfo* pAllocationInfo)
8793 {
8794  VMA_ASSERT(allocator && allocation && pAllocationInfo);
8795 
8796  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8797 
8798  allocator->GetAllocationInfo(allocation, pAllocationInfo);
8799 }
8800 
8801 VkBool32 vmaTouchAllocation(
8802  VmaAllocator allocator,
8803  VmaAllocation allocation)
8804 {
8805  VMA_ASSERT(allocator && allocation);
8806 
8807  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8808 
8809  return allocator->TouchAllocation(allocation);
8810 }
8811 
8813  VmaAllocator allocator,
8814  VmaAllocation allocation,
8815  void* pUserData)
8816 {
8817  VMA_ASSERT(allocator && allocation);
8818 
8819  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8820 
8821  allocation->SetUserData(allocator, pUserData);
8822 }
8823 
8825  VmaAllocator allocator,
8826  VmaAllocation* pAllocation)
8827 {
8828  VMA_ASSERT(allocator && pAllocation);
8829 
8830  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
8831 
8832  allocator->CreateLostAllocation(pAllocation);
8833 }
8834 
8835 VkResult vmaMapMemory(
8836  VmaAllocator allocator,
8837  VmaAllocation allocation,
8838  void** ppData)
8839 {
8840  VMA_ASSERT(allocator && allocation && ppData);
8841 
8842  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8843 
8844  return allocator->Map(allocation, ppData);
8845 }
8846 
8847 void vmaUnmapMemory(
8848  VmaAllocator allocator,
8849  VmaAllocation allocation)
8850 {
8851  VMA_ASSERT(allocator && allocation);
8852 
8853  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8854 
8855  allocator->Unmap(allocation);
8856 }
8857 
8858 VkResult vmaDefragment(
8859  VmaAllocator allocator,
8860  VmaAllocation* pAllocations,
8861  size_t allocationCount,
8862  VkBool32* pAllocationsChanged,
8863  const VmaDefragmentationInfo *pDefragmentationInfo,
8864  VmaDefragmentationStats* pDefragmentationStats)
8865 {
8866  VMA_ASSERT(allocator && pAllocations);
8867 
8868  VMA_DEBUG_LOG("vmaDefragment");
8869 
8870  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8871 
8872  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
8873 }
8874 
8875 VkResult vmaCreateBuffer(
8876  VmaAllocator allocator,
8877  const VkBufferCreateInfo* pBufferCreateInfo,
8878  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8879  VkBuffer* pBuffer,
8880  VmaAllocation* pAllocation,
8881  VmaAllocationInfo* pAllocationInfo)
8882 {
8883  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
8884 
8885  VMA_DEBUG_LOG("vmaCreateBuffer");
8886 
8887  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8888 
8889  *pBuffer = VK_NULL_HANDLE;
8890  *pAllocation = VK_NULL_HANDLE;
8891 
8892  // 1. Create VkBuffer.
8893  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
8894  allocator->m_hDevice,
8895  pBufferCreateInfo,
8896  allocator->GetAllocationCallbacks(),
8897  pBuffer);
8898  if(res >= 0)
8899  {
8900  // 2. vkGetBufferMemoryRequirements.
8901  VkMemoryRequirements vkMemReq = {};
8902  bool requiresDedicatedAllocation = false;
8903  bool prefersDedicatedAllocation = false;
8904  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
8905  requiresDedicatedAllocation, prefersDedicatedAllocation);
8906 
8907  // Make sure alignment requirements for specific buffer usages reported
8908  // in Physical Device Properties are included in alignment reported by memory requirements.
8909  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
8910  {
8911  VMA_ASSERT(vkMemReq.alignment %
8912  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
8913  }
8914  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
8915  {
8916  VMA_ASSERT(vkMemReq.alignment %
8917  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
8918  }
8919  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
8920  {
8921  VMA_ASSERT(vkMemReq.alignment %
8922  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
8923  }
8924 
8925  // 3. Allocate memory using allocator.
8926  res = allocator->AllocateMemory(
8927  vkMemReq,
8928  requiresDedicatedAllocation,
8929  prefersDedicatedAllocation,
8930  *pBuffer, // dedicatedBuffer
8931  VK_NULL_HANDLE, // dedicatedImage
8932  *pAllocationCreateInfo,
8933  VMA_SUBALLOCATION_TYPE_BUFFER,
8934  pAllocation);
8935  if(res >= 0)
8936  {
8937  // 3. Bind buffer with memory.
8938  res = (*allocator->GetVulkanFunctions().vkBindBufferMemory)(
8939  allocator->m_hDevice,
8940  *pBuffer,
8941  (*pAllocation)->GetMemory(),
8942  (*pAllocation)->GetOffset());
8943  if(res >= 0)
8944  {
8945  // All steps succeeded.
8946  if(pAllocationInfo != VMA_NULL)
8947  {
8948  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8949  }
8950  return VK_SUCCESS;
8951  }
8952  allocator->FreeMemory(*pAllocation);
8953  *pAllocation = VK_NULL_HANDLE;
8954  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
8955  *pBuffer = VK_NULL_HANDLE;
8956  return res;
8957  }
8958  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
8959  *pBuffer = VK_NULL_HANDLE;
8960  return res;
8961  }
8962  return res;
8963 }
8964 
8965 void vmaDestroyBuffer(
8966  VmaAllocator allocator,
8967  VkBuffer buffer,
8968  VmaAllocation allocation)
8969 {
8970  if(buffer != VK_NULL_HANDLE)
8971  {
8972  VMA_ASSERT(allocator);
8973 
8974  VMA_DEBUG_LOG("vmaDestroyBuffer");
8975 
8976  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8977 
8978  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
8979 
8980  allocator->FreeMemory(allocation);
8981  }
8982 }
8983 
8984 VkResult vmaCreateImage(
8985  VmaAllocator allocator,
8986  const VkImageCreateInfo* pImageCreateInfo,
8987  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8988  VkImage* pImage,
8989  VmaAllocation* pAllocation,
8990  VmaAllocationInfo* pAllocationInfo)
8991 {
8992  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
8993 
8994  VMA_DEBUG_LOG("vmaCreateImage");
8995 
8996  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8997 
8998  *pImage = VK_NULL_HANDLE;
8999  *pAllocation = VK_NULL_HANDLE;
9000 
9001  // 1. Create VkImage.
9002  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
9003  allocator->m_hDevice,
9004  pImageCreateInfo,
9005  allocator->GetAllocationCallbacks(),
9006  pImage);
9007  if(res >= 0)
9008  {
9009  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
9010  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
9011  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
9012 
9013  // 2. Allocate memory using allocator.
9014  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
9015  if(res >= 0)
9016  {
9017  // 3. Bind image with memory.
9018  res = (*allocator->GetVulkanFunctions().vkBindImageMemory)(
9019  allocator->m_hDevice,
9020  *pImage,
9021  (*pAllocation)->GetMemory(),
9022  (*pAllocation)->GetOffset());
9023  if(res >= 0)
9024  {
9025  // All steps succeeded.
9026  if(pAllocationInfo != VMA_NULL)
9027  {
9028  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
9029  }
9030  return VK_SUCCESS;
9031  }
9032  allocator->FreeMemory(*pAllocation);
9033  *pAllocation = VK_NULL_HANDLE;
9034  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
9035  *pImage = VK_NULL_HANDLE;
9036  return res;
9037  }
9038  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
9039  *pImage = VK_NULL_HANDLE;
9040  return res;
9041  }
9042  return res;
9043 }
9044 
9045 void vmaDestroyImage(
9046  VmaAllocator allocator,
9047  VkImage image,
9048  VmaAllocation allocation)
9049 {
9050  if(image != VK_NULL_HANDLE)
9051  {
9052  VMA_ASSERT(allocator);
9053 
9054  VMA_DEBUG_LOG("vmaDestroyImage");
9055 
9056  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9057 
9058  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
9059 
9060  allocator->FreeMemory(allocation);
9061  }
9062 }
9063 
9064 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1005
+
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:1259
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
-
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1029
+
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1030
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
-
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1014
+
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1015
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
-
Definition: vk_mem_alloc.h:1215
-
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1008
-
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:1564
-
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1026
-
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:1763
-
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:1434
+
Definition: vk_mem_alloc.h:1216
+
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1009
+
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:1565
+
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1027
+
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:1764
+
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:1435
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
-
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:1488
-
Definition: vk_mem_alloc.h:1295
-
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:997
-
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:1333
-
Definition: vk_mem_alloc.h:1242
-
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1038
+
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:1489
+
Definition: vk_mem_alloc.h:1296
+
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:998
+
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:1334
+
Definition: vk_mem_alloc.h:1243
+
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1039
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
-
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:1091
-
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1023
+
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:1092
+
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1024
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
-
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:1246
+
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:1247
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
-
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1156
-
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1011
-
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1155
-
PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR
Definition: vk_mem_alloc.h:1019
-
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:1767
+
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1157
+
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1012
+
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1156
+
PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR
Definition: vk_mem_alloc.h:1020
+
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:1768
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
-
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1055
-
VmaStatInfo total
Definition: vk_mem_alloc.h:1165
-
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:1775
-
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1317
-
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1758
-
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1012
-
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:939
-
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1032
-
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:1442
-
Definition: vk_mem_alloc.h:1436
-
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:1574
+
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1056
+
VmaStatInfo total
Definition: vk_mem_alloc.h:1166
+
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:1776
+
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1318
+
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1759
+
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1013
+
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:940
+
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1033
+
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:1443
+
Definition: vk_mem_alloc.h:1437
+
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:1575
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
-
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1009
-
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:1354
-
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:1458
-
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:1494
+
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1010
+
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:1355
+
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:1459
+
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:1495
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
-
Definition: vk_mem_alloc.h:995
-
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:1445
+
Definition: vk_mem_alloc.h:996
+
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:1446
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
-
VmaMemoryUsage
Definition: vk_mem_alloc.h:1193
+
VmaMemoryUsage
Definition: vk_mem_alloc.h:1194
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
-
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:1753
+
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:1754
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
-
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:1771
-
Definition: vk_mem_alloc.h:1232
-
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:1341
-
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1010
+
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:1772
+
Definition: vk_mem_alloc.h:1233
+
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:1342
+
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1011
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
-
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1161
-
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:945
+
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1162
+
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:946
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
-
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:966
+
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:967
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
-
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:971
-
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:1773
+
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:972
+
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:1774
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
-
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1328
-
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region.
Definition: vk_mem_alloc.h:1504
+
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1329
+
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region.
Definition: vk_mem_alloc.h:1505
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
-
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1005
-
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1144
-
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
Definition: vk_mem_alloc.h:1453
-
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:958
+
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1006
+
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1145
+
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
Definition: vk_mem_alloc.h:1454
+
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:959
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
-
Definition: vk_mem_alloc.h:1302
-
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1157
-
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:962
-
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:1448
-
Definition: vk_mem_alloc.h:1241
+
Definition: vk_mem_alloc.h:1303
+
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1158
+
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:963
+
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:1449
+
Definition: vk_mem_alloc.h:1242
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
-
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1323
-
Definition: vk_mem_alloc.h:1314
+
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1324
+
Definition: vk_mem_alloc.h:1315
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
-
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1147
-
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1007
-
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:1466
-
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1041
-
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1497
-
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:1312
-
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:1347
+
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1148
+
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1008
+
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:1467
+
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1042
+
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1498
+
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:1313
+
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:1348
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
-
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1079
-
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1163
-
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:1282
-
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1156
+
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1080
+
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1164
+
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:1283
+
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1157
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
-
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1016
-
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:960
-
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1015
+
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1017
+
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:961
+
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1016
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
-
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1480
+
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1481
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
-
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:1588
-
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
Definition: vk_mem_alloc.h:1035
-
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1156
-
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1153
+
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:1589
+
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
Definition: vk_mem_alloc.h:1036
+
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1157
+
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1154
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
-
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:1485
-
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:1569
-
Definition: vk_mem_alloc.h:1310
-
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1769
-
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1003
+
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:1486
+
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:1570
+
Definition: vk_mem_alloc.h:1311
+
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1770
+
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1004
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
-
PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR
Definition: vk_mem_alloc.h:1018
-
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1151
-
Definition: vk_mem_alloc.h:1198
-
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:1438
+
PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR
Definition: vk_mem_alloc.h:1019
+
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1152
+
Definition: vk_mem_alloc.h:1199
+
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:1439
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
-
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1149
-
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1013
-
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1017
-
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:1269
-
Definition: vk_mem_alloc.h:1225
-
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:1583
+
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1150
+
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1014
+
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1018
+
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:1270
+
Definition: vk_mem_alloc.h:1226
+
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:1584
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
-
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:993
+
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:994
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
-
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1006
-
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:1550
+
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1007
+
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:1551
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
-
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:1416
-
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1157
+
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:1417
+
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1158
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
- -
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1164
+ +
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1165
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
-
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1491
-
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1157
-
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:1555
+
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1492
+
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1158
+
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:1556