From 5a51732c47dfd44bb495c8ea54f093516a06388d Mon Sep 17 00:00:00 2001 From: Adam Sawicki Date: Tue, 3 Apr 2018 12:28:11 +0200 Subject: [PATCH] Added link to my GDC talk. --- docs/html/usage_patterns.html | 3 +- docs/html/vk__mem__alloc_8h_source.html | 218 ++++++++++++------------ src/vk_mem_alloc.h | 4 + 3 files changed, 115 insertions(+), 110 deletions(-) diff --git a/docs/html/usage_patterns.html b/docs/html/usage_patterns.html index 9b9f459..314a272 100644 --- a/docs/html/usage_patterns.html +++ b/docs/html/usage_patterns.html @@ -66,7 +66,8 @@ $(function() {
Recommended usage patterns
-

+

See also slides from talk: Sawicki, Adam. Advanced Graphics Techniques Tutorial: Memory management in Vulkan and DX12. Game Developers Conference, 2018

+

Simple patterns

Render targets

diff --git a/docs/html/vk__mem__alloc_8h_source.html b/docs/html/vk__mem__alloc_8h_source.html index 60bb91c..f66dc35 100644 --- a/docs/html/vk__mem__alloc_8h_source.html +++ b/docs/html/vk__mem__alloc_8h_source.html @@ -62,162 +62,162 @@ $(function() {
vk_mem_alloc.h

-Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1074 #include <vulkan/vulkan.h>
1075 
1085 VK_DEFINE_HANDLE(VmaAllocator)
1086 
1087 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1089  VmaAllocator allocator,
1090  uint32_t memoryType,
1091  VkDeviceMemory memory,
1092  VkDeviceSize size);
1094 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1095  VmaAllocator allocator,
1096  uint32_t memoryType,
1097  VkDeviceMemory memory,
1098  VkDeviceSize size);
1099 
1113 
1143 
1146 typedef VkFlags VmaAllocatorCreateFlags;
1147 
1152 typedef struct VmaVulkanFunctions {
1153  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1154  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1155  PFN_vkAllocateMemory vkAllocateMemory;
1156  PFN_vkFreeMemory vkFreeMemory;
1157  PFN_vkMapMemory vkMapMemory;
1158  PFN_vkUnmapMemory vkUnmapMemory;
1159  PFN_vkBindBufferMemory vkBindBufferMemory;
1160  PFN_vkBindImageMemory vkBindImageMemory;
1161  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1162  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1163  PFN_vkCreateBuffer vkCreateBuffer;
1164  PFN_vkDestroyBuffer vkDestroyBuffer;
1165  PFN_vkCreateImage vkCreateImage;
1166  PFN_vkDestroyImage vkDestroyImage;
1167  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1168  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1170 
1173 {
1175  VmaAllocatorCreateFlags flags;
1177 
1178  VkPhysicalDevice physicalDevice;
1180 
1181  VkDevice device;
1183 
1186 
1187  const VkAllocationCallbacks* pAllocationCallbacks;
1189 
1228  const VkDeviceSize* pHeapSizeLimit;
1242 
1244 VkResult vmaCreateAllocator(
1245  const VmaAllocatorCreateInfo* pCreateInfo,
1246  VmaAllocator* pAllocator);
1247 
1249 void vmaDestroyAllocator(
1250  VmaAllocator allocator);
1251 
1257  VmaAllocator allocator,
1258  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1259 
1265  VmaAllocator allocator,
1266  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1267 
1275  VmaAllocator allocator,
1276  uint32_t memoryTypeIndex,
1277  VkMemoryPropertyFlags* pFlags);
1278 
1288  VmaAllocator allocator,
1289  uint32_t frameIndex);
1290 
1293 typedef struct VmaStatInfo
1294 {
1296  uint32_t blockCount;
1302  VkDeviceSize usedBytes;
1304  VkDeviceSize unusedBytes;
1305  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
1306  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
1307 } VmaStatInfo;
1308 
1310 typedef struct VmaStats
1311 {
1312  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1313  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1315 } VmaStats;
1316 
1318 void vmaCalculateStats(
1319  VmaAllocator allocator,
1320  VmaStats* pStats);
1321 
1322 #define VMA_STATS_STRING_ENABLED 1
1323 
1324 #if VMA_STATS_STRING_ENABLED
1325 
1327 
1329 void vmaBuildStatsString(
1330  VmaAllocator allocator,
1331  char** ppStatsString,
1332  VkBool32 detailedMap);
1333 
1334 void vmaFreeStatsString(
1335  VmaAllocator allocator,
1336  char* pStatsString);
1337 
1338 #endif // #if VMA_STATS_STRING_ENABLED
1339 
1348 VK_DEFINE_HANDLE(VmaPool)
1349 
1350 typedef enum VmaMemoryUsage
1351 {
1400 } VmaMemoryUsage;
1401 
1416 
1466 
1470 
1472 {
1474  VmaAllocationCreateFlags flags;
1485  VkMemoryPropertyFlags requiredFlags;
1490  VkMemoryPropertyFlags preferredFlags;
1498  uint32_t memoryTypeBits;
1511  void* pUserData;
1513 
1530 VkResult vmaFindMemoryTypeIndex(
1531  VmaAllocator allocator,
1532  uint32_t memoryTypeBits,
1533  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1534  uint32_t* pMemoryTypeIndex);
1535 
1549  VmaAllocator allocator,
1550  const VkBufferCreateInfo* pBufferCreateInfo,
1551  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1552  uint32_t* pMemoryTypeIndex);
1553 
1567  VmaAllocator allocator,
1568  const VkImageCreateInfo* pImageCreateInfo,
1569  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1570  uint32_t* pMemoryTypeIndex);
1571 
1592 
1595 typedef VkFlags VmaPoolCreateFlags;
1596 
1599 typedef struct VmaPoolCreateInfo {
1605  VmaPoolCreateFlags flags;
1610  VkDeviceSize blockSize;
1639 
1642 typedef struct VmaPoolStats {
1645  VkDeviceSize size;
1648  VkDeviceSize unusedSize;
1661  VkDeviceSize unusedRangeSizeMax;
1662 } VmaPoolStats;
1663 
1670 VkResult vmaCreatePool(
1671  VmaAllocator allocator,
1672  const VmaPoolCreateInfo* pCreateInfo,
1673  VmaPool* pPool);
1674 
1677 void vmaDestroyPool(
1678  VmaAllocator allocator,
1679  VmaPool pool);
1680 
1687 void vmaGetPoolStats(
1688  VmaAllocator allocator,
1689  VmaPool pool,
1690  VmaPoolStats* pPoolStats);
1691 
1699  VmaAllocator allocator,
1700  VmaPool pool,
1701  size_t* pLostAllocationCount);
1702 
1727 VK_DEFINE_HANDLE(VmaAllocation)
1728 
1729 
1731 typedef struct VmaAllocationInfo {
1736  uint32_t memoryType;
1745  VkDeviceMemory deviceMemory;
1750  VkDeviceSize offset;
1755  VkDeviceSize size;
1769  void* pUserData;
1771 
1782 VkResult vmaAllocateMemory(
1783  VmaAllocator allocator,
1784  const VkMemoryRequirements* pVkMemoryRequirements,
1785  const VmaAllocationCreateInfo* pCreateInfo,
1786  VmaAllocation* pAllocation,
1787  VmaAllocationInfo* pAllocationInfo);
1788 
1796  VmaAllocator allocator,
1797  VkBuffer buffer,
1798  const VmaAllocationCreateInfo* pCreateInfo,
1799  VmaAllocation* pAllocation,
1800  VmaAllocationInfo* pAllocationInfo);
1801 
1803 VkResult vmaAllocateMemoryForImage(
1804  VmaAllocator allocator,
1805  VkImage image,
1806  const VmaAllocationCreateInfo* pCreateInfo,
1807  VmaAllocation* pAllocation,
1808  VmaAllocationInfo* pAllocationInfo);
1809 
1811 void vmaFreeMemory(
1812  VmaAllocator allocator,
1813  VmaAllocation allocation);
1814 
1832  VmaAllocator allocator,
1833  VmaAllocation allocation,
1834  VmaAllocationInfo* pAllocationInfo);
1835 
1850 VkBool32 vmaTouchAllocation(
1851  VmaAllocator allocator,
1852  VmaAllocation allocation);
1853 
1868  VmaAllocator allocator,
1869  VmaAllocation allocation,
1870  void* pUserData);
1871 
1883  VmaAllocator allocator,
1884  VmaAllocation* pAllocation);
1885 
1920 VkResult vmaMapMemory(
1921  VmaAllocator allocator,
1922  VmaAllocation allocation,
1923  void** ppData);
1924 
1929 void vmaUnmapMemory(
1930  VmaAllocator allocator,
1931  VmaAllocation allocation);
1932 
1934 typedef struct VmaDefragmentationInfo {
1939  VkDeviceSize maxBytesToMove;
1946 
1948 typedef struct VmaDefragmentationStats {
1950  VkDeviceSize bytesMoved;
1952  VkDeviceSize bytesFreed;
1958 
2041 VkResult vmaDefragment(
2042  VmaAllocator allocator,
2043  VmaAllocation* pAllocations,
2044  size_t allocationCount,
2045  VkBool32* pAllocationsChanged,
2046  const VmaDefragmentationInfo *pDefragmentationInfo,
2047  VmaDefragmentationStats* pDefragmentationStats);
2048 
2061 VkResult vmaBindBufferMemory(
2062  VmaAllocator allocator,
2063  VmaAllocation allocation,
2064  VkBuffer buffer);
2065 
2078 VkResult vmaBindImageMemory(
2079  VmaAllocator allocator,
2080  VmaAllocation allocation,
2081  VkImage image);
2082 
2109 VkResult vmaCreateBuffer(
2110  VmaAllocator allocator,
2111  const VkBufferCreateInfo* pBufferCreateInfo,
2112  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2113  VkBuffer* pBuffer,
2114  VmaAllocation* pAllocation,
2115  VmaAllocationInfo* pAllocationInfo);
2116 
2128 void vmaDestroyBuffer(
2129  VmaAllocator allocator,
2130  VkBuffer buffer,
2131  VmaAllocation allocation);
2132 
2134 VkResult vmaCreateImage(
2135  VmaAllocator allocator,
2136  const VkImageCreateInfo* pImageCreateInfo,
2137  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2138  VkImage* pImage,
2139  VmaAllocation* pAllocation,
2140  VmaAllocationInfo* pAllocationInfo);
2141 
2153 void vmaDestroyImage(
2154  VmaAllocator allocator,
2155  VkImage image,
2156  VmaAllocation allocation);
2157 
2158 #ifdef __cplusplus
2159 }
2160 #endif
2161 
2162 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
2163 
2164 // For Visual Studio IntelliSense.
2165 #ifdef __INTELLISENSE__
2166 #define VMA_IMPLEMENTATION
2167 #endif
2168 
2169 #ifdef VMA_IMPLEMENTATION
2170 #undef VMA_IMPLEMENTATION
2171 
2172 #include <cstdint>
2173 #include <cstdlib>
2174 #include <cstring>
2175 
2176 /*******************************************************************************
2177 CONFIGURATION SECTION
2178 
2179 Define some of these macros before each #include of this header or change them
2180 here if you need other then default behavior depending on your environment.
2181 */
2182 
2183 /*
2184 Define this macro to 1 to make the library fetch pointers to Vulkan functions
2185 internally, like:
2186 
2187  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
2188 
2189 Define to 0 if you are going to provide you own pointers to Vulkan functions via
2190 VmaAllocatorCreateInfo::pVulkanFunctions.
2191 */
2192 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
2193 #define VMA_STATIC_VULKAN_FUNCTIONS 1
2194 #endif
2195 
2196 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
2197 //#define VMA_USE_STL_CONTAINERS 1
2198 
2199 /* Set this macro to 1 to make the library including and using STL containers:
2200 std::pair, std::vector, std::list, std::unordered_map.
2201 
2202 Set it to 0 or undefined to make the library using its own implementation of
2203 the containers.
2204 */
2205 #if VMA_USE_STL_CONTAINERS
2206  #define VMA_USE_STL_VECTOR 1
2207  #define VMA_USE_STL_UNORDERED_MAP 1
2208  #define VMA_USE_STL_LIST 1
2209 #endif
2210 
2211 #if VMA_USE_STL_VECTOR
2212  #include <vector>
2213 #endif
2214 
2215 #if VMA_USE_STL_UNORDERED_MAP
2216  #include <unordered_map>
2217 #endif
2218 
2219 #if VMA_USE_STL_LIST
2220  #include <list>
2221 #endif
2222 
2223 /*
2224 Following headers are used in this CONFIGURATION section only, so feel free to
2225 remove them if not needed.
2226 */
2227 #include <cassert> // for assert
2228 #include <algorithm> // for min, max
2229 #include <mutex> // for std::mutex
2230 #include <atomic> // for std::atomic
2231 
2232 #if !defined(_WIN32) && !defined(__APPLE__)
2233  #include <malloc.h> // for aligned_alloc()
2234 #endif
2235 
2236 #ifndef VMA_NULL
2237  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
2238  #define VMA_NULL nullptr
2239 #endif
2240 
2241 #if defined(__APPLE__) || defined(__ANDROID__)
2242 #include <cstdlib>
2243 void *aligned_alloc(size_t alignment, size_t size)
2244 {
2245  // alignment must be >= sizeof(void*)
2246  if(alignment < sizeof(void*))
2247  {
2248  alignment = sizeof(void*);
2249  }
2250 
2251  void *pointer;
2252  if(posix_memalign(&pointer, alignment, size) == 0)
2253  return pointer;
2254  return VMA_NULL;
2255 }
2256 #endif
2257 
2258 // Normal assert to check for programmer's errors, especially in Debug configuration.
2259 #ifndef VMA_ASSERT
2260  #ifdef _DEBUG
2261  #define VMA_ASSERT(expr) assert(expr)
2262  #else
2263  #define VMA_ASSERT(expr)
2264  #endif
2265 #endif
2266 
2267 // Assert that will be called very often, like inside data structures e.g. operator[].
2268 // Making it non-empty can make program slow.
2269 #ifndef VMA_HEAVY_ASSERT
2270  #ifdef _DEBUG
2271  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
2272  #else
2273  #define VMA_HEAVY_ASSERT(expr)
2274  #endif
2275 #endif
2276 
2277 #ifndef VMA_ALIGN_OF
2278  #define VMA_ALIGN_OF(type) (__alignof(type))
2279 #endif
2280 
2281 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
2282  #if defined(_WIN32)
2283  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
2284  #else
2285  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
2286  #endif
2287 #endif
2288 
2289 #ifndef VMA_SYSTEM_FREE
2290  #if defined(_WIN32)
2291  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
2292  #else
2293  #define VMA_SYSTEM_FREE(ptr) free(ptr)
2294  #endif
2295 #endif
2296 
2297 #ifndef VMA_MIN
2298  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
2299 #endif
2300 
2301 #ifndef VMA_MAX
2302  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
2303 #endif
2304 
2305 #ifndef VMA_SWAP
2306  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
2307 #endif
2308 
2309 #ifndef VMA_SORT
2310  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
2311 #endif
2312 
2313 #ifndef VMA_DEBUG_LOG
2314  #define VMA_DEBUG_LOG(format, ...)
2315  /*
2316  #define VMA_DEBUG_LOG(format, ...) do { \
2317  printf(format, __VA_ARGS__); \
2318  printf("\n"); \
2319  } while(false)
2320  */
2321 #endif
2322 
2323 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
2324 #if VMA_STATS_STRING_ENABLED
2325  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
2326  {
2327  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
2328  }
2329  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
2330  {
2331  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
2332  }
2333  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
2334  {
2335  snprintf(outStr, strLen, "%p", ptr);
2336  }
2337 #endif
2338 
2339 #ifndef VMA_MUTEX
2340  class VmaMutex
2341  {
2342  public:
2343  VmaMutex() { }
2344  ~VmaMutex() { }
2345  void Lock() { m_Mutex.lock(); }
2346  void Unlock() { m_Mutex.unlock(); }
2347  private:
2348  std::mutex m_Mutex;
2349  };
2350  #define VMA_MUTEX VmaMutex
2351 #endif
2352 
2353 /*
2354 If providing your own implementation, you need to implement a subset of std::atomic:
2355 
2356 - Constructor(uint32_t desired)
2357 - uint32_t load() const
2358 - void store(uint32_t desired)
2359 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
2360 */
2361 #ifndef VMA_ATOMIC_UINT32
2362  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
2363 #endif
2364 
2365 #ifndef VMA_BEST_FIT
2366 
2378  #define VMA_BEST_FIT (1)
2379 #endif
2380 
2381 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
2382 
2386  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
2387 #endif
2388 
2389 #ifndef VMA_DEBUG_ALIGNMENT
2390 
2394  #define VMA_DEBUG_ALIGNMENT (1)
2395 #endif
2396 
2397 #ifndef VMA_DEBUG_MARGIN
2398 
2402  #define VMA_DEBUG_MARGIN (0)
2403 #endif
2404 
2405 #ifndef VMA_DEBUG_GLOBAL_MUTEX
2406 
2410  #define VMA_DEBUG_GLOBAL_MUTEX (0)
2411 #endif
2412 
2413 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
2414 
2418  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
2419 #endif
2420 
2421 #ifndef VMA_SMALL_HEAP_MAX_SIZE
2422  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
2424 #endif
2425 
2426 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
2427  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
2429 #endif
2430 
2431 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
2432 
2433 /*******************************************************************************
2434 END OF CONFIGURATION
2435 */
2436 
2437 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
2438  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
2439 
2440 // Returns number of bits set to 1 in (v).
2441 static inline uint32_t VmaCountBitsSet(uint32_t v)
2442 {
2443  uint32_t c = v - ((v >> 1) & 0x55555555);
2444  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
2445  c = ((c >> 4) + c) & 0x0F0F0F0F;
2446  c = ((c >> 8) + c) & 0x00FF00FF;
2447  c = ((c >> 16) + c) & 0x0000FFFF;
2448  return c;
2449 }
2450 
2451 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
2452 // Use types like uint32_t, uint64_t as T.
2453 template <typename T>
2454 static inline T VmaAlignUp(T val, T align)
2455 {
2456  return (val + align - 1) / align * align;
2457 }
2458 
2459 // Division with mathematical rounding to nearest number.
2460 template <typename T>
2461 inline T VmaRoundDiv(T x, T y)
2462 {
2463  return (x + (y / (T)2)) / y;
2464 }
2465 
2466 #ifndef VMA_SORT
2467 
2468 template<typename Iterator, typename Compare>
2469 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
2470 {
2471  Iterator centerValue = end; --centerValue;
2472  Iterator insertIndex = beg;
2473  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
2474  {
2475  if(cmp(*memTypeIndex, *centerValue))
2476  {
2477  if(insertIndex != memTypeIndex)
2478  {
2479  VMA_SWAP(*memTypeIndex, *insertIndex);
2480  }
2481  ++insertIndex;
2482  }
2483  }
2484  if(insertIndex != centerValue)
2485  {
2486  VMA_SWAP(*insertIndex, *centerValue);
2487  }
2488  return insertIndex;
2489 }
2490 
2491 template<typename Iterator, typename Compare>
2492 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
2493 {
2494  if(beg < end)
2495  {
2496  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
2497  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
2498  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
2499  }
2500 }
2501 
2502 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
2503 
2504 #endif // #ifndef VMA_SORT
2505 
2506 /*
2507 Returns true if two memory blocks occupy overlapping pages.
2508 ResourceA must be in less memory offset than ResourceB.
2509 
2510 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
2511 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
2512 */
2513 static inline bool VmaBlocksOnSamePage(
2514  VkDeviceSize resourceAOffset,
2515  VkDeviceSize resourceASize,
2516  VkDeviceSize resourceBOffset,
2517  VkDeviceSize pageSize)
2518 {
2519  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
2520  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
2521  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
2522  VkDeviceSize resourceBStart = resourceBOffset;
2523  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
2524  return resourceAEndPage == resourceBStartPage;
2525 }
2526 
2527 enum VmaSuballocationType
2528 {
2529  VMA_SUBALLOCATION_TYPE_FREE = 0,
2530  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
2531  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
2532  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
2533  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
2534  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
2535  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
2536 };
2537 
2538 /*
2539 Returns true if given suballocation types could conflict and must respect
2540 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
2541 or linear image and another one is optimal image. If type is unknown, behave
2542 conservatively.
2543 */
2544 static inline bool VmaIsBufferImageGranularityConflict(
2545  VmaSuballocationType suballocType1,
2546  VmaSuballocationType suballocType2)
2547 {
2548  if(suballocType1 > suballocType2)
2549  {
2550  VMA_SWAP(suballocType1, suballocType2);
2551  }
2552 
2553  switch(suballocType1)
2554  {
2555  case VMA_SUBALLOCATION_TYPE_FREE:
2556  return false;
2557  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
2558  return true;
2559  case VMA_SUBALLOCATION_TYPE_BUFFER:
2560  return
2561  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2562  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2563  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
2564  return
2565  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2566  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
2567  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2568  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
2569  return
2570  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2571  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
2572  return false;
2573  default:
2574  VMA_ASSERT(0);
2575  return true;
2576  }
2577 }
2578 
2579 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
2580 struct VmaMutexLock
2581 {
2582 public:
2583  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
2584  m_pMutex(useMutex ? &mutex : VMA_NULL)
2585  {
2586  if(m_pMutex)
2587  {
2588  m_pMutex->Lock();
2589  }
2590  }
2591 
2592  ~VmaMutexLock()
2593  {
2594  if(m_pMutex)
2595  {
2596  m_pMutex->Unlock();
2597  }
2598  }
2599 
2600 private:
2601  VMA_MUTEX* m_pMutex;
2602 };
2603 
2604 #if VMA_DEBUG_GLOBAL_MUTEX
2605  static VMA_MUTEX gDebugGlobalMutex;
2606  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
2607 #else
2608  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
2609 #endif
2610 
2611 // Minimum size of a free suballocation to register it in the free suballocation collection.
2612 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
2613 
2614 /*
2615 Performs binary search and returns iterator to first element that is greater or
2616 equal to (key), according to comparison (cmp).
2617 
2618 Cmp should return true if first argument is less than second argument.
2619 
2620 Returned value is the found element, if present in the collection or place where
2621 new element with value (key) should be inserted.
2622 */
2623 template <typename IterT, typename KeyT, typename CmpT>
2624 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
2625 {
2626  size_t down = 0, up = (end - beg);
2627  while(down < up)
2628  {
2629  const size_t mid = (down + up) / 2;
2630  if(cmp(*(beg+mid), key))
2631  {
2632  down = mid + 1;
2633  }
2634  else
2635  {
2636  up = mid;
2637  }
2638  }
2639  return beg + down;
2640 }
2641 
2643 // Memory allocation
2644 
2645 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
2646 {
2647  if((pAllocationCallbacks != VMA_NULL) &&
2648  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
2649  {
2650  return (*pAllocationCallbacks->pfnAllocation)(
2651  pAllocationCallbacks->pUserData,
2652  size,
2653  alignment,
2654  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2655  }
2656  else
2657  {
2658  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
2659  }
2660 }
2661 
2662 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
2663 {
2664  if((pAllocationCallbacks != VMA_NULL) &&
2665  (pAllocationCallbacks->pfnFree != VMA_NULL))
2666  {
2667  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
2668  }
2669  else
2670  {
2671  VMA_SYSTEM_FREE(ptr);
2672  }
2673 }
2674 
2675 template<typename T>
2676 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
2677 {
2678  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
2679 }
2680 
2681 template<typename T>
2682 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
2683 {
2684  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
2685 }
2686 
2687 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
2688 
2689 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
2690 
2691 template<typename T>
2692 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
2693 {
2694  ptr->~T();
2695  VmaFree(pAllocationCallbacks, ptr);
2696 }
2697 
2698 template<typename T>
2699 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
2700 {
2701  if(ptr != VMA_NULL)
2702  {
2703  for(size_t i = count; i--; )
2704  {
2705  ptr[i].~T();
2706  }
2707  VmaFree(pAllocationCallbacks, ptr);
2708  }
2709 }
2710 
2711 // STL-compatible allocator.
2712 template<typename T>
2713 class VmaStlAllocator
2714 {
2715 public:
2716  const VkAllocationCallbacks* const m_pCallbacks;
2717  typedef T value_type;
2718 
2719  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
2720  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
2721 
2722  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
2723  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
2724 
2725  template<typename U>
2726  bool operator==(const VmaStlAllocator<U>& rhs) const
2727  {
2728  return m_pCallbacks == rhs.m_pCallbacks;
2729  }
2730  template<typename U>
2731  bool operator!=(const VmaStlAllocator<U>& rhs) const
2732  {
2733  return m_pCallbacks != rhs.m_pCallbacks;
2734  }
2735 
2736  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
2737 };
2738 
2739 #if VMA_USE_STL_VECTOR
2740 
2741 #define VmaVector std::vector
2742 
2743 template<typename T, typename allocatorT>
2744 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
2745 {
2746  vec.insert(vec.begin() + index, item);
2747 }
2748 
2749 template<typename T, typename allocatorT>
2750 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
2751 {
2752  vec.erase(vec.begin() + index);
2753 }
2754 
2755 #else // #if VMA_USE_STL_VECTOR
2756 
2757 /* Class with interface compatible with subset of std::vector.
2758 T must be POD because constructors and destructors are not called and memcpy is
2759 used for these objects. */
2760 template<typename T, typename AllocatorT>
2761 class VmaVector
2762 {
2763 public:
2764  typedef T value_type;
2765 
2766  VmaVector(const AllocatorT& allocator) :
2767  m_Allocator(allocator),
2768  m_pArray(VMA_NULL),
2769  m_Count(0),
2770  m_Capacity(0)
2771  {
2772  }
2773 
2774  VmaVector(size_t count, const AllocatorT& allocator) :
2775  m_Allocator(allocator),
2776  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
2777  m_Count(count),
2778  m_Capacity(count)
2779  {
2780  }
2781 
2782  VmaVector(const VmaVector<T, AllocatorT>& src) :
2783  m_Allocator(src.m_Allocator),
2784  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
2785  m_Count(src.m_Count),
2786  m_Capacity(src.m_Count)
2787  {
2788  if(m_Count != 0)
2789  {
2790  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
2791  }
2792  }
2793 
2794  ~VmaVector()
2795  {
2796  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2797  }
2798 
2799  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
2800  {
2801  if(&rhs != this)
2802  {
2803  resize(rhs.m_Count);
2804  if(m_Count != 0)
2805  {
2806  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
2807  }
2808  }
2809  return *this;
2810  }
2811 
2812  bool empty() const { return m_Count == 0; }
2813  size_t size() const { return m_Count; }
2814  T* data() { return m_pArray; }
2815  const T* data() const { return m_pArray; }
2816 
2817  T& operator[](size_t index)
2818  {
2819  VMA_HEAVY_ASSERT(index < m_Count);
2820  return m_pArray[index];
2821  }
2822  const T& operator[](size_t index) const
2823  {
2824  VMA_HEAVY_ASSERT(index < m_Count);
2825  return m_pArray[index];
2826  }
2827 
2828  T& front()
2829  {
2830  VMA_HEAVY_ASSERT(m_Count > 0);
2831  return m_pArray[0];
2832  }
2833  const T& front() const
2834  {
2835  VMA_HEAVY_ASSERT(m_Count > 0);
2836  return m_pArray[0];
2837  }
2838  T& back()
2839  {
2840  VMA_HEAVY_ASSERT(m_Count > 0);
2841  return m_pArray[m_Count - 1];
2842  }
2843  const T& back() const
2844  {
2845  VMA_HEAVY_ASSERT(m_Count > 0);
2846  return m_pArray[m_Count - 1];
2847  }
2848 
2849  void reserve(size_t newCapacity, bool freeMemory = false)
2850  {
2851  newCapacity = VMA_MAX(newCapacity, m_Count);
2852 
2853  if((newCapacity < m_Capacity) && !freeMemory)
2854  {
2855  newCapacity = m_Capacity;
2856  }
2857 
2858  if(newCapacity != m_Capacity)
2859  {
2860  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
2861  if(m_Count != 0)
2862  {
2863  memcpy(newArray, m_pArray, m_Count * sizeof(T));
2864  }
2865  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2866  m_Capacity = newCapacity;
2867  m_pArray = newArray;
2868  }
2869  }
2870 
2871  void resize(size_t newCount, bool freeMemory = false)
2872  {
2873  size_t newCapacity = m_Capacity;
2874  if(newCount > m_Capacity)
2875  {
2876  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
2877  }
2878  else if(freeMemory)
2879  {
2880  newCapacity = newCount;
2881  }
2882 
2883  if(newCapacity != m_Capacity)
2884  {
2885  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
2886  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
2887  if(elementsToCopy != 0)
2888  {
2889  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
2890  }
2891  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2892  m_Capacity = newCapacity;
2893  m_pArray = newArray;
2894  }
2895 
2896  m_Count = newCount;
2897  }
2898 
2899  void clear(bool freeMemory = false)
2900  {
2901  resize(0, freeMemory);
2902  }
2903 
2904  void insert(size_t index, const T& src)
2905  {
2906  VMA_HEAVY_ASSERT(index <= m_Count);
2907  const size_t oldCount = size();
2908  resize(oldCount + 1);
2909  if(index < oldCount)
2910  {
2911  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
2912  }
2913  m_pArray[index] = src;
2914  }
2915 
2916  void remove(size_t index)
2917  {
2918  VMA_HEAVY_ASSERT(index < m_Count);
2919  const size_t oldCount = size();
2920  if(index < oldCount - 1)
2921  {
2922  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
2923  }
2924  resize(oldCount - 1);
2925  }
2926 
2927  void push_back(const T& src)
2928  {
2929  const size_t newIndex = size();
2930  resize(newIndex + 1);
2931  m_pArray[newIndex] = src;
2932  }
2933 
2934  void pop_back()
2935  {
2936  VMA_HEAVY_ASSERT(m_Count > 0);
2937  resize(size() - 1);
2938  }
2939 
2940  void push_front(const T& src)
2941  {
2942  insert(0, src);
2943  }
2944 
2945  void pop_front()
2946  {
2947  VMA_HEAVY_ASSERT(m_Count > 0);
2948  remove(0);
2949  }
2950 
2951  typedef T* iterator;
2952 
2953  iterator begin() { return m_pArray; }
2954  iterator end() { return m_pArray + m_Count; }
2955 
2956 private:
2957  AllocatorT m_Allocator;
2958  T* m_pArray;
2959  size_t m_Count;
2960  size_t m_Capacity;
2961 };
2962 
2963 template<typename T, typename allocatorT>
2964 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
2965 {
2966  vec.insert(index, item);
2967 }
2968 
2969 template<typename T, typename allocatorT>
2970 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
2971 {
2972  vec.remove(index);
2973 }
2974 
2975 #endif // #if VMA_USE_STL_VECTOR
2976 
2977 template<typename CmpLess, typename VectorT>
2978 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
2979 {
2980  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2981  vector.data(),
2982  vector.data() + vector.size(),
2983  value,
2984  CmpLess()) - vector.data();
2985  VmaVectorInsert(vector, indexToInsert, value);
2986  return indexToInsert;
2987 }
2988 
2989 template<typename CmpLess, typename VectorT>
2990 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
2991 {
2992  CmpLess comparator;
2993  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2994  vector.begin(),
2995  vector.end(),
2996  value,
2997  comparator);
2998  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
2999  {
3000  size_t indexToRemove = it - vector.begin();
3001  VmaVectorRemove(vector, indexToRemove);
3002  return true;
3003  }
3004  return false;
3005 }
3006 
3007 template<typename CmpLess, typename VectorT>
3008 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
3009 {
3010  CmpLess comparator;
3011  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
3012  vector.data(),
3013  vector.data() + vector.size(),
3014  value,
3015  comparator);
3016  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
3017  {
3018  return it - vector.begin();
3019  }
3020  else
3021  {
3022  return vector.size();
3023  }
3024 }
3025 
3027 // class VmaPoolAllocator
3028 
3029 /*
3030 Allocator for objects of type T using a list of arrays (pools) to speed up
3031 allocation. Number of elements that can be allocated is not bounded because
3032 allocator can create multiple blocks.
3033 */
3034 template<typename T>
3035 class VmaPoolAllocator
3036 {
3037 public:
3038  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
3039  ~VmaPoolAllocator();
3040  void Clear();
3041  T* Alloc();
3042  void Free(T* ptr);
3043 
3044 private:
3045  union Item
3046  {
3047  uint32_t NextFreeIndex;
3048  T Value;
3049  };
3050 
3051  struct ItemBlock
3052  {
3053  Item* pItems;
3054  uint32_t FirstFreeIndex;
3055  };
3056 
3057  const VkAllocationCallbacks* m_pAllocationCallbacks;
3058  size_t m_ItemsPerBlock;
3059  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
3060 
3061  ItemBlock& CreateNewBlock();
3062 };
3063 
3064 template<typename T>
3065 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
3066  m_pAllocationCallbacks(pAllocationCallbacks),
3067  m_ItemsPerBlock(itemsPerBlock),
3068  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
3069 {
3070  VMA_ASSERT(itemsPerBlock > 0);
3071 }
3072 
3073 template<typename T>
3074 VmaPoolAllocator<T>::~VmaPoolAllocator()
3075 {
3076  Clear();
3077 }
3078 
3079 template<typename T>
3080 void VmaPoolAllocator<T>::Clear()
3081 {
3082  for(size_t i = m_ItemBlocks.size(); i--; )
3083  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
3084  m_ItemBlocks.clear();
3085 }
3086 
3087 template<typename T>
3088 T* VmaPoolAllocator<T>::Alloc()
3089 {
3090  for(size_t i = m_ItemBlocks.size(); i--; )
3091  {
3092  ItemBlock& block = m_ItemBlocks[i];
3093  // This block has some free items: Use first one.
3094  if(block.FirstFreeIndex != UINT32_MAX)
3095  {
3096  Item* const pItem = &block.pItems[block.FirstFreeIndex];
3097  block.FirstFreeIndex = pItem->NextFreeIndex;
3098  return &pItem->Value;
3099  }
3100  }
3101 
3102  // No block has free item: Create new one and use it.
3103  ItemBlock& newBlock = CreateNewBlock();
3104  Item* const pItem = &newBlock.pItems[0];
3105  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
3106  return &pItem->Value;
3107 }
3108 
3109 template<typename T>
3110 void VmaPoolAllocator<T>::Free(T* ptr)
3111 {
3112  // Search all memory blocks to find ptr.
3113  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
3114  {
3115  ItemBlock& block = m_ItemBlocks[i];
3116 
3117  // Casting to union.
3118  Item* pItemPtr;
3119  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
3120 
3121  // Check if pItemPtr is in address range of this block.
3122  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
3123  {
3124  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
3125  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
3126  block.FirstFreeIndex = index;
3127  return;
3128  }
3129  }
3130  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
3131 }
3132 
3133 template<typename T>
3134 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
3135 {
3136  ItemBlock newBlock = {
3137  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
3138 
3139  m_ItemBlocks.push_back(newBlock);
3140 
3141  // Setup singly-linked list of all free items in this block.
3142  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
3143  newBlock.pItems[i].NextFreeIndex = i + 1;
3144  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
3145  return m_ItemBlocks.back();
3146 }
3147 
3149 // class VmaRawList, VmaList
3150 
3151 #if VMA_USE_STL_LIST
3152 
3153 #define VmaList std::list
3154 
3155 #else // #if VMA_USE_STL_LIST
3156 
3157 template<typename T>
3158 struct VmaListItem
3159 {
3160  VmaListItem* pPrev;
3161  VmaListItem* pNext;
3162  T Value;
3163 };
3164 
3165 // Doubly linked list.
3166 template<typename T>
3167 class VmaRawList
3168 {
3169 public:
3170  typedef VmaListItem<T> ItemType;
3171 
3172  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
3173  ~VmaRawList();
3174  void Clear();
3175 
3176  size_t GetCount() const { return m_Count; }
3177  bool IsEmpty() const { return m_Count == 0; }
3178 
3179  ItemType* Front() { return m_pFront; }
3180  const ItemType* Front() const { return m_pFront; }
3181  ItemType* Back() { return m_pBack; }
3182  const ItemType* Back() const { return m_pBack; }
3183 
3184  ItemType* PushBack();
3185  ItemType* PushFront();
3186  ItemType* PushBack(const T& value);
3187  ItemType* PushFront(const T& value);
3188  void PopBack();
3189  void PopFront();
3190 
3191  // Item can be null - it means PushBack.
3192  ItemType* InsertBefore(ItemType* pItem);
3193  // Item can be null - it means PushFront.
3194  ItemType* InsertAfter(ItemType* pItem);
3195 
3196  ItemType* InsertBefore(ItemType* pItem, const T& value);
3197  ItemType* InsertAfter(ItemType* pItem, const T& value);
3198 
3199  void Remove(ItemType* pItem);
3200 
3201 private:
3202  const VkAllocationCallbacks* const m_pAllocationCallbacks;
3203  VmaPoolAllocator<ItemType> m_ItemAllocator;
3204  ItemType* m_pFront;
3205  ItemType* m_pBack;
3206  size_t m_Count;
3207 
3208  // Declared not defined, to block copy constructor and assignment operator.
3209  VmaRawList(const VmaRawList<T>& src);
3210  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
3211 };
3212 
3213 template<typename T>
3214 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
3215  m_pAllocationCallbacks(pAllocationCallbacks),
3216  m_ItemAllocator(pAllocationCallbacks, 128),
3217  m_pFront(VMA_NULL),
3218  m_pBack(VMA_NULL),
3219  m_Count(0)
3220 {
3221 }
3222 
3223 template<typename T>
3224 VmaRawList<T>::~VmaRawList()
3225 {
3226  // Intentionally not calling Clear, because that would be unnecessary
3227  // computations to return all items to m_ItemAllocator as free.
3228 }
3229 
3230 template<typename T>
3231 void VmaRawList<T>::Clear()
3232 {
3233  if(IsEmpty() == false)
3234  {
3235  ItemType* pItem = m_pBack;
3236  while(pItem != VMA_NULL)
3237  {
3238  ItemType* const pPrevItem = pItem->pPrev;
3239  m_ItemAllocator.Free(pItem);
3240  pItem = pPrevItem;
3241  }
3242  m_pFront = VMA_NULL;
3243  m_pBack = VMA_NULL;
3244  m_Count = 0;
3245  }
3246 }
3247 
3248 template<typename T>
3249 VmaListItem<T>* VmaRawList<T>::PushBack()
3250 {
3251  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3252  pNewItem->pNext = VMA_NULL;
3253  if(IsEmpty())
3254  {
3255  pNewItem->pPrev = VMA_NULL;
3256  m_pFront = pNewItem;
3257  m_pBack = pNewItem;
3258  m_Count = 1;
3259  }
3260  else
3261  {
3262  pNewItem->pPrev = m_pBack;
3263  m_pBack->pNext = pNewItem;
3264  m_pBack = pNewItem;
3265  ++m_Count;
3266  }
3267  return pNewItem;
3268 }
3269 
3270 template<typename T>
3271 VmaListItem<T>* VmaRawList<T>::PushFront()
3272 {
3273  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3274  pNewItem->pPrev = VMA_NULL;
3275  if(IsEmpty())
3276  {
3277  pNewItem->pNext = VMA_NULL;
3278  m_pFront = pNewItem;
3279  m_pBack = pNewItem;
3280  m_Count = 1;
3281  }
3282  else
3283  {
3284  pNewItem->pNext = m_pFront;
3285  m_pFront->pPrev = pNewItem;
3286  m_pFront = pNewItem;
3287  ++m_Count;
3288  }
3289  return pNewItem;
3290 }
3291 
3292 template<typename T>
3293 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
3294 {
3295  ItemType* const pNewItem = PushBack();
3296  pNewItem->Value = value;
3297  return pNewItem;
3298 }
3299 
3300 template<typename T>
3301 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
3302 {
3303  ItemType* const pNewItem = PushFront();
3304  pNewItem->Value = value;
3305  return pNewItem;
3306 }
3307 
3308 template<typename T>
3309 void VmaRawList<T>::PopBack()
3310 {
3311  VMA_HEAVY_ASSERT(m_Count > 0);
3312  ItemType* const pBackItem = m_pBack;
3313  ItemType* const pPrevItem = pBackItem->pPrev;
3314  if(pPrevItem != VMA_NULL)
3315  {
3316  pPrevItem->pNext = VMA_NULL;
3317  }
3318  m_pBack = pPrevItem;
3319  m_ItemAllocator.Free(pBackItem);
3320  --m_Count;
3321 }
3322 
3323 template<typename T>
3324 void VmaRawList<T>::PopFront()
3325 {
3326  VMA_HEAVY_ASSERT(m_Count > 0);
3327  ItemType* const pFrontItem = m_pFront;
3328  ItemType* const pNextItem = pFrontItem->pNext;
3329  if(pNextItem != VMA_NULL)
3330  {
3331  pNextItem->pPrev = VMA_NULL;
3332  }
3333  m_pFront = pNextItem;
3334  m_ItemAllocator.Free(pFrontItem);
3335  --m_Count;
3336 }
3337 
3338 template<typename T>
3339 void VmaRawList<T>::Remove(ItemType* pItem)
3340 {
3341  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
3342  VMA_HEAVY_ASSERT(m_Count > 0);
3343 
3344  if(pItem->pPrev != VMA_NULL)
3345  {
3346  pItem->pPrev->pNext = pItem->pNext;
3347  }
3348  else
3349  {
3350  VMA_HEAVY_ASSERT(m_pFront == pItem);
3351  m_pFront = pItem->pNext;
3352  }
3353 
3354  if(pItem->pNext != VMA_NULL)
3355  {
3356  pItem->pNext->pPrev = pItem->pPrev;
3357  }
3358  else
3359  {
3360  VMA_HEAVY_ASSERT(m_pBack == pItem);
3361  m_pBack = pItem->pPrev;
3362  }
3363 
3364  m_ItemAllocator.Free(pItem);
3365  --m_Count;
3366 }
3367 
3368 template<typename T>
3369 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
3370 {
3371  if(pItem != VMA_NULL)
3372  {
3373  ItemType* const prevItem = pItem->pPrev;
3374  ItemType* const newItem = m_ItemAllocator.Alloc();
3375  newItem->pPrev = prevItem;
3376  newItem->pNext = pItem;
3377  pItem->pPrev = newItem;
3378  if(prevItem != VMA_NULL)
3379  {
3380  prevItem->pNext = newItem;
3381  }
3382  else
3383  {
3384  VMA_HEAVY_ASSERT(m_pFront == pItem);
3385  m_pFront = newItem;
3386  }
3387  ++m_Count;
3388  return newItem;
3389  }
3390  else
3391  return PushBack();
3392 }
3393 
3394 template<typename T>
3395 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
3396 {
3397  if(pItem != VMA_NULL)
3398  {
3399  ItemType* const nextItem = pItem->pNext;
3400  ItemType* const newItem = m_ItemAllocator.Alloc();
3401  newItem->pNext = nextItem;
3402  newItem->pPrev = pItem;
3403  pItem->pNext = newItem;
3404  if(nextItem != VMA_NULL)
3405  {
3406  nextItem->pPrev = newItem;
3407  }
3408  else
3409  {
3410  VMA_HEAVY_ASSERT(m_pBack == pItem);
3411  m_pBack = newItem;
3412  }
3413  ++m_Count;
3414  return newItem;
3415  }
3416  else
3417  return PushFront();
3418 }
3419 
3420 template<typename T>
3421 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
3422 {
3423  ItemType* const newItem = InsertBefore(pItem);
3424  newItem->Value = value;
3425  return newItem;
3426 }
3427 
3428 template<typename T>
3429 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
3430 {
3431  ItemType* const newItem = InsertAfter(pItem);
3432  newItem->Value = value;
3433  return newItem;
3434 }
3435 
3436 template<typename T, typename AllocatorT>
3437 class VmaList
3438 {
3439 public:
3440  class iterator
3441  {
3442  public:
3443  iterator() :
3444  m_pList(VMA_NULL),
3445  m_pItem(VMA_NULL)
3446  {
3447  }
3448 
3449  T& operator*() const
3450  {
3451  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3452  return m_pItem->Value;
3453  }
3454  T* operator->() const
3455  {
3456  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3457  return &m_pItem->Value;
3458  }
3459 
3460  iterator& operator++()
3461  {
3462  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3463  m_pItem = m_pItem->pNext;
3464  return *this;
3465  }
3466  iterator& operator--()
3467  {
3468  if(m_pItem != VMA_NULL)
3469  {
3470  m_pItem = m_pItem->pPrev;
3471  }
3472  else
3473  {
3474  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3475  m_pItem = m_pList->Back();
3476  }
3477  return *this;
3478  }
3479 
3480  iterator operator++(int)
3481  {
3482  iterator result = *this;
3483  ++*this;
3484  return result;
3485  }
3486  iterator operator--(int)
3487  {
3488  iterator result = *this;
3489  --*this;
3490  return result;
3491  }
3492 
3493  bool operator==(const iterator& rhs) const
3494  {
3495  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3496  return m_pItem == rhs.m_pItem;
3497  }
3498  bool operator!=(const iterator& rhs) const
3499  {
3500  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3501  return m_pItem != rhs.m_pItem;
3502  }
3503 
3504  private:
3505  VmaRawList<T>* m_pList;
3506  VmaListItem<T>* m_pItem;
3507 
3508  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
3509  m_pList(pList),
3510  m_pItem(pItem)
3511  {
3512  }
3513 
3514  friend class VmaList<T, AllocatorT>;
3515  };
3516 
3517  class const_iterator
3518  {
3519  public:
3520  const_iterator() :
3521  m_pList(VMA_NULL),
3522  m_pItem(VMA_NULL)
3523  {
3524  }
3525 
3526  const_iterator(const iterator& src) :
3527  m_pList(src.m_pList),
3528  m_pItem(src.m_pItem)
3529  {
3530  }
3531 
3532  const T& operator*() const
3533  {
3534  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3535  return m_pItem->Value;
3536  }
3537  const T* operator->() const
3538  {
3539  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3540  return &m_pItem->Value;
3541  }
3542 
3543  const_iterator& operator++()
3544  {
3545  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3546  m_pItem = m_pItem->pNext;
3547  return *this;
3548  }
3549  const_iterator& operator--()
3550  {
3551  if(m_pItem != VMA_NULL)
3552  {
3553  m_pItem = m_pItem->pPrev;
3554  }
3555  else
3556  {
3557  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3558  m_pItem = m_pList->Back();
3559  }
3560  return *this;
3561  }
3562 
3563  const_iterator operator++(int)
3564  {
3565  const_iterator result = *this;
3566  ++*this;
3567  return result;
3568  }
3569  const_iterator operator--(int)
3570  {
3571  const_iterator result = *this;
3572  --*this;
3573  return result;
3574  }
3575 
3576  bool operator==(const const_iterator& rhs) const
3577  {
3578  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3579  return m_pItem == rhs.m_pItem;
3580  }
3581  bool operator!=(const const_iterator& rhs) const
3582  {
3583  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3584  return m_pItem != rhs.m_pItem;
3585  }
3586 
3587  private:
3588  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
3589  m_pList(pList),
3590  m_pItem(pItem)
3591  {
3592  }
3593 
3594  const VmaRawList<T>* m_pList;
3595  const VmaListItem<T>* m_pItem;
3596 
3597  friend class VmaList<T, AllocatorT>;
3598  };
3599 
3600  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
3601 
3602  bool empty() const { return m_RawList.IsEmpty(); }
3603  size_t size() const { return m_RawList.GetCount(); }
3604 
3605  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
3606  iterator end() { return iterator(&m_RawList, VMA_NULL); }
3607 
3608  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
3609  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
3610 
3611  void clear() { m_RawList.Clear(); }
3612  void push_back(const T& value) { m_RawList.PushBack(value); }
3613  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
3614  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
3615 
3616 private:
3617  VmaRawList<T> m_RawList;
3618 };
3619 
3620 #endif // #if VMA_USE_STL_LIST
3621 
3623 // class VmaMap
3624 
3625 // Unused in this version.
3626 #if 0
3627 
3628 #if VMA_USE_STL_UNORDERED_MAP
3629 
3630 #define VmaPair std::pair
3631 
3632 #define VMA_MAP_TYPE(KeyT, ValueT) \
3633  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
3634 
3635 #else // #if VMA_USE_STL_UNORDERED_MAP
3636 
3637 template<typename T1, typename T2>
3638 struct VmaPair
3639 {
3640  T1 first;
3641  T2 second;
3642 
3643  VmaPair() : first(), second() { }
3644  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
3645 };
3646 
3647 /* Class compatible with subset of interface of std::unordered_map.
3648 KeyT, ValueT must be POD because they will be stored in VmaVector.
3649 */
3650 template<typename KeyT, typename ValueT>
3651 class VmaMap
3652 {
3653 public:
3654  typedef VmaPair<KeyT, ValueT> PairType;
3655  typedef PairType* iterator;
3656 
3657  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
3658 
3659  iterator begin() { return m_Vector.begin(); }
3660  iterator end() { return m_Vector.end(); }
3661 
3662  void insert(const PairType& pair);
3663  iterator find(const KeyT& key);
3664  void erase(iterator it);
3665 
3666 private:
3667  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
3668 };
3669 
3670 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
3671 
3672 template<typename FirstT, typename SecondT>
3673 struct VmaPairFirstLess
3674 {
3675  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
3676  {
3677  return lhs.first < rhs.first;
3678  }
3679  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
3680  {
3681  return lhs.first < rhsFirst;
3682  }
3683 };
3684 
3685 template<typename KeyT, typename ValueT>
3686 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
3687 {
3688  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3689  m_Vector.data(),
3690  m_Vector.data() + m_Vector.size(),
3691  pair,
3692  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
3693  VmaVectorInsert(m_Vector, indexToInsert, pair);
3694 }
3695 
3696 template<typename KeyT, typename ValueT>
3697 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
3698 {
3699  PairType* it = VmaBinaryFindFirstNotLess(
3700  m_Vector.data(),
3701  m_Vector.data() + m_Vector.size(),
3702  key,
3703  VmaPairFirstLess<KeyT, ValueT>());
3704  if((it != m_Vector.end()) && (it->first == key))
3705  {
3706  return it;
3707  }
3708  else
3709  {
3710  return m_Vector.end();
3711  }
3712 }
3713 
3714 template<typename KeyT, typename ValueT>
3715 void VmaMap<KeyT, ValueT>::erase(iterator it)
3716 {
3717  VmaVectorRemove(m_Vector, it - m_Vector.begin());
3718 }
3719 
3720 #endif // #if VMA_USE_STL_UNORDERED_MAP
3721 
3722 #endif // #if 0
3723 
3725 
3726 class VmaDeviceMemoryBlock;
3727 
3728 struct VmaAllocation_T
3729 {
3730 private:
3731  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
3732 
3733  enum FLAGS
3734  {
3735  FLAG_USER_DATA_STRING = 0x01,
3736  };
3737 
3738 public:
3739  enum ALLOCATION_TYPE
3740  {
3741  ALLOCATION_TYPE_NONE,
3742  ALLOCATION_TYPE_BLOCK,
3743  ALLOCATION_TYPE_DEDICATED,
3744  };
3745 
3746  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
3747  m_Alignment(1),
3748  m_Size(0),
3749  m_pUserData(VMA_NULL),
3750  m_LastUseFrameIndex(currentFrameIndex),
3751  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
3752  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
3753  m_MapCount(0),
3754  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
3755  {
3756  }
3757 
3758  ~VmaAllocation_T()
3759  {
3760  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
3761 
3762  // Check if owned string was freed.
3763  VMA_ASSERT(m_pUserData == VMA_NULL);
3764  }
3765 
3766  void InitBlockAllocation(
3767  VmaPool hPool,
3768  VmaDeviceMemoryBlock* block,
3769  VkDeviceSize offset,
3770  VkDeviceSize alignment,
3771  VkDeviceSize size,
3772  VmaSuballocationType suballocationType,
3773  bool mapped,
3774  bool canBecomeLost)
3775  {
3776  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3777  VMA_ASSERT(block != VMA_NULL);
3778  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3779  m_Alignment = alignment;
3780  m_Size = size;
3781  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3782  m_SuballocationType = (uint8_t)suballocationType;
3783  m_BlockAllocation.m_hPool = hPool;
3784  m_BlockAllocation.m_Block = block;
3785  m_BlockAllocation.m_Offset = offset;
3786  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
3787  }
3788 
3789  void InitLost()
3790  {
3791  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3792  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
3793  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3794  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
3795  m_BlockAllocation.m_Block = VMA_NULL;
3796  m_BlockAllocation.m_Offset = 0;
3797  m_BlockAllocation.m_CanBecomeLost = true;
3798  }
3799 
3800  void ChangeBlockAllocation(
3801  VmaAllocator hAllocator,
3802  VmaDeviceMemoryBlock* block,
3803  VkDeviceSize offset);
3804 
3805  // pMappedData not null means allocation is created with MAPPED flag.
3806  void InitDedicatedAllocation(
3807  uint32_t memoryTypeIndex,
3808  VkDeviceMemory hMemory,
3809  VmaSuballocationType suballocationType,
3810  void* pMappedData,
3811  VkDeviceSize size)
3812  {
3813  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3814  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
3815  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
3816  m_Alignment = 0;
3817  m_Size = size;
3818  m_SuballocationType = (uint8_t)suballocationType;
3819  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3820  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
3821  m_DedicatedAllocation.m_hMemory = hMemory;
3822  m_DedicatedAllocation.m_pMappedData = pMappedData;
3823  }
3824 
3825  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
3826  VkDeviceSize GetAlignment() const { return m_Alignment; }
3827  VkDeviceSize GetSize() const { return m_Size; }
3828  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
3829  void* GetUserData() const { return m_pUserData; }
3830  void SetUserData(VmaAllocator hAllocator, void* pUserData);
3831  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
3832 
3833  VmaDeviceMemoryBlock* GetBlock() const
3834  {
3835  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
3836  return m_BlockAllocation.m_Block;
3837  }
3838  VkDeviceSize GetOffset() const;
3839  VkDeviceMemory GetMemory() const;
3840  uint32_t GetMemoryTypeIndex() const;
3841  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
3842  void* GetMappedData() const;
3843  bool CanBecomeLost() const;
3844  VmaPool GetPool() const;
3845 
3846  uint32_t GetLastUseFrameIndex() const
3847  {
3848  return m_LastUseFrameIndex.load();
3849  }
3850  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
3851  {
3852  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
3853  }
3854  /*
3855  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
3856  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
3857  - Else, returns false.
3858 
3859  If hAllocation is already lost, assert - you should not call it then.
3860  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
3861  */
3862  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3863 
3864  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
3865  {
3866  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
3867  outInfo.blockCount = 1;
3868  outInfo.allocationCount = 1;
3869  outInfo.unusedRangeCount = 0;
3870  outInfo.usedBytes = m_Size;
3871  outInfo.unusedBytes = 0;
3872  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
3873  outInfo.unusedRangeSizeMin = UINT64_MAX;
3874  outInfo.unusedRangeSizeMax = 0;
3875  }
3876 
3877  void BlockAllocMap();
3878  void BlockAllocUnmap();
3879  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
3880  void DedicatedAllocUnmap(VmaAllocator hAllocator);
3881 
3882 private:
3883  VkDeviceSize m_Alignment;
3884  VkDeviceSize m_Size;
3885  void* m_pUserData;
3886  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
3887  uint8_t m_Type; // ALLOCATION_TYPE
3888  uint8_t m_SuballocationType; // VmaSuballocationType
3889  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
3890  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
3891  uint8_t m_MapCount;
3892  uint8_t m_Flags; // enum FLAGS
3893 
3894  // Allocation out of VmaDeviceMemoryBlock.
3895  struct BlockAllocation
3896  {
3897  VmaPool m_hPool; // Null if belongs to general memory.
3898  VmaDeviceMemoryBlock* m_Block;
3899  VkDeviceSize m_Offset;
3900  bool m_CanBecomeLost;
3901  };
3902 
3903  // Allocation for an object that has its own private VkDeviceMemory.
3904  struct DedicatedAllocation
3905  {
3906  uint32_t m_MemoryTypeIndex;
3907  VkDeviceMemory m_hMemory;
3908  void* m_pMappedData; // Not null means memory is mapped.
3909  };
3910 
3911  union
3912  {
3913  // Allocation out of VmaDeviceMemoryBlock.
3914  BlockAllocation m_BlockAllocation;
3915  // Allocation for an object that has its own private VkDeviceMemory.
3916  DedicatedAllocation m_DedicatedAllocation;
3917  };
3918 
3919  void FreeUserDataString(VmaAllocator hAllocator);
3920 };
3921 
3922 /*
3923 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
3924 allocated memory block or free.
3925 */
3926 struct VmaSuballocation
3927 {
3928  VkDeviceSize offset;
3929  VkDeviceSize size;
3930  VmaAllocation hAllocation;
3931  VmaSuballocationType type;
3932 };
3933 
3934 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
3935 
3936 // Cost of one additional allocation lost, as equivalent in bytes.
3937 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
3938 
3939 /*
3940 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
3941 
3942 If canMakeOtherLost was false:
3943 - item points to a FREE suballocation.
3944 - itemsToMakeLostCount is 0.
3945 
3946 If canMakeOtherLost was true:
3947 - item points to first of sequence of suballocations, which are either FREE,
3948  or point to VmaAllocations that can become lost.
3949 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
3950  the requested allocation to succeed.
3951 */
3952 struct VmaAllocationRequest
3953 {
3954  VkDeviceSize offset;
3955  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
3956  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
3957  VmaSuballocationList::iterator item;
3958  size_t itemsToMakeLostCount;
3959 
3960  VkDeviceSize CalcCost() const
3961  {
3962  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
3963  }
3964 };
3965 
3966 /*
3967 Data structure used for bookkeeping of allocations and unused ranges of memory
3968 in a single VkDeviceMemory block.
3969 */
3970 class VmaBlockMetadata
3971 {
3972 public:
3973  VmaBlockMetadata(VmaAllocator hAllocator);
3974  ~VmaBlockMetadata();
3975  void Init(VkDeviceSize size);
3976 
3977  // Validates all data structures inside this object. If not valid, returns false.
3978  bool Validate() const;
3979  VkDeviceSize GetSize() const { return m_Size; }
3980  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
3981  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
3982  VkDeviceSize GetUnusedRangeSizeMax() const;
3983  // Returns true if this block is empty - contains only single free suballocation.
3984  bool IsEmpty() const;
3985 
3986  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
3987  void AddPoolStats(VmaPoolStats& inoutStats) const;
3988 
3989 #if VMA_STATS_STRING_ENABLED
3990  void PrintDetailedMap(class VmaJsonWriter& json) const;
3991 #endif
3992 
3993  // Creates trivial request for case when block is empty.
3994  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
3995 
3996  // Tries to find a place for suballocation with given parameters inside this block.
3997  // If succeeded, fills pAllocationRequest and returns true.
3998  // If failed, returns false.
3999  bool CreateAllocationRequest(
4000  uint32_t currentFrameIndex,
4001  uint32_t frameInUseCount,
4002  VkDeviceSize bufferImageGranularity,
4003  VkDeviceSize allocSize,
4004  VkDeviceSize allocAlignment,
4005  VmaSuballocationType allocType,
4006  bool canMakeOtherLost,
4007  VmaAllocationRequest* pAllocationRequest);
4008 
4009  bool MakeRequestedAllocationsLost(
4010  uint32_t currentFrameIndex,
4011  uint32_t frameInUseCount,
4012  VmaAllocationRequest* pAllocationRequest);
4013 
4014  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4015 
4016  // Makes actual allocation based on request. Request must already be checked and valid.
4017  void Alloc(
4018  const VmaAllocationRequest& request,
4019  VmaSuballocationType type,
4020  VkDeviceSize allocSize,
4021  VmaAllocation hAllocation);
4022 
4023  // Frees suballocation assigned to given memory region.
4024  void Free(const VmaAllocation allocation);
4025  void FreeAtOffset(VkDeviceSize offset);
4026 
4027 private:
4028  VkDeviceSize m_Size;
4029  uint32_t m_FreeCount;
4030  VkDeviceSize m_SumFreeSize;
4031  VmaSuballocationList m_Suballocations;
4032  // Suballocations that are free and have size greater than certain threshold.
4033  // Sorted by size, ascending.
4034  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
4035 
4036  bool ValidateFreeSuballocationList() const;
4037 
4038  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
4039  // If yes, fills pOffset and returns true. If no, returns false.
4040  bool CheckAllocation(
4041  uint32_t currentFrameIndex,
4042  uint32_t frameInUseCount,
4043  VkDeviceSize bufferImageGranularity,
4044  VkDeviceSize allocSize,
4045  VkDeviceSize allocAlignment,
4046  VmaSuballocationType allocType,
4047  VmaSuballocationList::const_iterator suballocItem,
4048  bool canMakeOtherLost,
4049  VkDeviceSize* pOffset,
4050  size_t* itemsToMakeLostCount,
4051  VkDeviceSize* pSumFreeSize,
4052  VkDeviceSize* pSumItemSize) const;
4053  // Given free suballocation, it merges it with following one, which must also be free.
4054  void MergeFreeWithNext(VmaSuballocationList::iterator item);
4055  // Releases given suballocation, making it free.
4056  // Merges it with adjacent free suballocations if applicable.
4057  // Returns iterator to new free suballocation at this place.
4058  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
4059  // Given free suballocation, it inserts it into sorted list of
4060  // m_FreeSuballocationsBySize if it's suitable.
4061  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
4062  // Given free suballocation, it removes it from sorted list of
4063  // m_FreeSuballocationsBySize if it's suitable.
4064  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
4065 };
4066 
4067 /*
4068 Represents a single block of device memory (`VkDeviceMemory`) with all the
4069 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
4070 
4071 Thread-safety: This class must be externally synchronized.
4072 */
4073 class VmaDeviceMemoryBlock
4074 {
4075 public:
4076  VmaBlockMetadata m_Metadata;
4077 
4078  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
4079 
4080  ~VmaDeviceMemoryBlock()
4081  {
4082  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
4083  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
4084  }
4085 
4086  // Always call after construction.
4087  void Init(
4088  uint32_t newMemoryTypeIndex,
4089  VkDeviceMemory newMemory,
4090  VkDeviceSize newSize);
4091  // Always call before destruction.
4092  void Destroy(VmaAllocator allocator);
4093 
4094  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
4095  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
4096  void* GetMappedData() const { return m_pMappedData; }
4097 
4098  // Validates all data structures inside this object. If not valid, returns false.
4099  bool Validate() const;
4100 
4101  // ppData can be null.
4102  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
4103  void Unmap(VmaAllocator hAllocator, uint32_t count);
4104 
4105  VkResult BindBufferMemory(
4106  const VmaAllocator hAllocator,
4107  const VmaAllocation hAllocation,
4108  VkBuffer hBuffer);
4109  VkResult BindImageMemory(
4110  const VmaAllocator hAllocator,
4111  const VmaAllocation hAllocation,
4112  VkImage hImage);
4113 
4114 private:
4115  uint32_t m_MemoryTypeIndex;
4116  VkDeviceMemory m_hMemory;
4117 
4118  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
4119  // Also protects m_MapCount, m_pMappedData.
4120  VMA_MUTEX m_Mutex;
4121  uint32_t m_MapCount;
4122  void* m_pMappedData;
4123 };
4124 
4125 struct VmaPointerLess
4126 {
4127  bool operator()(const void* lhs, const void* rhs) const
4128  {
4129  return lhs < rhs;
4130  }
4131 };
4132 
4133 class VmaDefragmentator;
4134 
4135 /*
4136 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
4137 Vulkan memory type.
4138 
4139 Synchronized internally with a mutex.
4140 */
4141 struct VmaBlockVector
4142 {
4143  VmaBlockVector(
4144  VmaAllocator hAllocator,
4145  uint32_t memoryTypeIndex,
4146  VkDeviceSize preferredBlockSize,
4147  size_t minBlockCount,
4148  size_t maxBlockCount,
4149  VkDeviceSize bufferImageGranularity,
4150  uint32_t frameInUseCount,
4151  bool isCustomPool);
4152  ~VmaBlockVector();
4153 
4154  VkResult CreateMinBlocks();
4155 
4156  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
4157  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
4158  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
4159  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
4160 
4161  void GetPoolStats(VmaPoolStats* pStats);
4162 
4163  bool IsEmpty() const { return m_Blocks.empty(); }
4164 
4165  VkResult Allocate(
4166  VmaPool hCurrentPool,
4167  uint32_t currentFrameIndex,
4168  const VkMemoryRequirements& vkMemReq,
4169  const VmaAllocationCreateInfo& createInfo,
4170  VmaSuballocationType suballocType,
4171  VmaAllocation* pAllocation);
4172 
4173  void Free(
4174  VmaAllocation hAllocation);
4175 
4176  // Adds statistics of this BlockVector to pStats.
4177  void AddStats(VmaStats* pStats);
4178 
4179 #if VMA_STATS_STRING_ENABLED
4180  void PrintDetailedMap(class VmaJsonWriter& json);
4181 #endif
4182 
4183  void MakePoolAllocationsLost(
4184  uint32_t currentFrameIndex,
4185  size_t* pLostAllocationCount);
4186 
4187  VmaDefragmentator* EnsureDefragmentator(
4188  VmaAllocator hAllocator,
4189  uint32_t currentFrameIndex);
4190 
4191  VkResult Defragment(
4192  VmaDefragmentationStats* pDefragmentationStats,
4193  VkDeviceSize& maxBytesToMove,
4194  uint32_t& maxAllocationsToMove);
4195 
4196  void DestroyDefragmentator();
4197 
4198 private:
4199  friend class VmaDefragmentator;
4200 
4201  const VmaAllocator m_hAllocator;
4202  const uint32_t m_MemoryTypeIndex;
4203  const VkDeviceSize m_PreferredBlockSize;
4204  const size_t m_MinBlockCount;
4205  const size_t m_MaxBlockCount;
4206  const VkDeviceSize m_BufferImageGranularity;
4207  const uint32_t m_FrameInUseCount;
4208  const bool m_IsCustomPool;
4209  VMA_MUTEX m_Mutex;
4210  // Incrementally sorted by sumFreeSize, ascending.
4211  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
4212  /* There can be at most one allocation that is completely empty - a
4213  hysteresis to avoid pessimistic case of alternating creation and destruction
4214  of a VkDeviceMemory. */
4215  bool m_HasEmptyBlock;
4216  VmaDefragmentator* m_pDefragmentator;
4217 
4218  size_t CalcMaxBlockSize() const;
4219 
4220  // Finds and removes given block from vector.
4221  void Remove(VmaDeviceMemoryBlock* pBlock);
4222 
4223  // Performs single step in sorting m_Blocks. They may not be fully sorted
4224  // after this call.
4225  void IncrementallySortBlocks();
4226 
4227  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
4228 };
4229 
4230 struct VmaPool_T
4231 {
4232 public:
4233  VmaBlockVector m_BlockVector;
4234 
4235  // Takes ownership.
4236  VmaPool_T(
4237  VmaAllocator hAllocator,
4238  const VmaPoolCreateInfo& createInfo);
4239  ~VmaPool_T();
4240 
4241  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
4242 
4243 #if VMA_STATS_STRING_ENABLED
4244  //void PrintDetailedMap(class VmaStringBuilder& sb);
4245 #endif
4246 };
4247 
4248 class VmaDefragmentator
4249 {
4250  const VmaAllocator m_hAllocator;
4251  VmaBlockVector* const m_pBlockVector;
4252  uint32_t m_CurrentFrameIndex;
4253  VkDeviceSize m_BytesMoved;
4254  uint32_t m_AllocationsMoved;
4255 
4256  struct AllocationInfo
4257  {
4258  VmaAllocation m_hAllocation;
4259  VkBool32* m_pChanged;
4260 
4261  AllocationInfo() :
4262  m_hAllocation(VK_NULL_HANDLE),
4263  m_pChanged(VMA_NULL)
4264  {
4265  }
4266  };
4267 
4268  struct AllocationInfoSizeGreater
4269  {
4270  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
4271  {
4272  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
4273  }
4274  };
4275 
4276  // Used between AddAllocation and Defragment.
4277  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
4278 
4279  struct BlockInfo
4280  {
4281  VmaDeviceMemoryBlock* m_pBlock;
4282  bool m_HasNonMovableAllocations;
4283  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
4284 
4285  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
4286  m_pBlock(VMA_NULL),
4287  m_HasNonMovableAllocations(true),
4288  m_Allocations(pAllocationCallbacks),
4289  m_pMappedDataForDefragmentation(VMA_NULL)
4290  {
4291  }
4292 
4293  void CalcHasNonMovableAllocations()
4294  {
4295  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
4296  const size_t defragmentAllocCount = m_Allocations.size();
4297  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
4298  }
4299 
4300  void SortAllocationsBySizeDescecnding()
4301  {
4302  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
4303  }
4304 
4305  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
4306  void Unmap(VmaAllocator hAllocator);
4307 
4308  private:
4309  // Not null if mapped for defragmentation only, not originally mapped.
4310  void* m_pMappedDataForDefragmentation;
4311  };
4312 
4313  struct BlockPointerLess
4314  {
4315  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
4316  {
4317  return pLhsBlockInfo->m_pBlock < pRhsBlock;
4318  }
4319  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
4320  {
4321  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
4322  }
4323  };
4324 
4325  // 1. Blocks with some non-movable allocations go first.
4326  // 2. Blocks with smaller sumFreeSize go first.
4327  struct BlockInfoCompareMoveDestination
4328  {
4329  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
4330  {
4331  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
4332  {
4333  return true;
4334  }
4335  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
4336  {
4337  return false;
4338  }
4339  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
4340  {
4341  return true;
4342  }
4343  return false;
4344  }
4345  };
4346 
4347  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
4348  BlockInfoVector m_Blocks;
4349 
4350  VkResult DefragmentRound(
4351  VkDeviceSize maxBytesToMove,
4352  uint32_t maxAllocationsToMove);
4353 
4354  static bool MoveMakesSense(
4355  size_t dstBlockIndex, VkDeviceSize dstOffset,
4356  size_t srcBlockIndex, VkDeviceSize srcOffset);
4357 
4358 public:
4359  VmaDefragmentator(
4360  VmaAllocator hAllocator,
4361  VmaBlockVector* pBlockVector,
4362  uint32_t currentFrameIndex);
4363 
4364  ~VmaDefragmentator();
4365 
4366  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
4367  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
4368 
4369  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
4370 
4371  VkResult Defragment(
4372  VkDeviceSize maxBytesToMove,
4373  uint32_t maxAllocationsToMove);
4374 };
4375 
4376 // Main allocator object.
4377 struct VmaAllocator_T
4378 {
4379  bool m_UseMutex;
4380  bool m_UseKhrDedicatedAllocation;
4381  VkDevice m_hDevice;
4382  bool m_AllocationCallbacksSpecified;
4383  VkAllocationCallbacks m_AllocationCallbacks;
4384  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
4385 
4386  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
4387  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
4388  VMA_MUTEX m_HeapSizeLimitMutex;
4389 
4390  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
4391  VkPhysicalDeviceMemoryProperties m_MemProps;
4392 
4393  // Default pools.
4394  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
4395 
4396  // Each vector is sorted by memory (handle value).
4397  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
4398  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
4399  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
4400 
4401  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
4402  ~VmaAllocator_T();
4403 
4404  const VkAllocationCallbacks* GetAllocationCallbacks() const
4405  {
4406  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
4407  }
4408  const VmaVulkanFunctions& GetVulkanFunctions() const
4409  {
4410  return m_VulkanFunctions;
4411  }
4412 
4413  VkDeviceSize GetBufferImageGranularity() const
4414  {
4415  return VMA_MAX(
4416  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
4417  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
4418  }
4419 
4420  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
4421  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
4422 
4423  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
4424  {
4425  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
4426  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
4427  }
4428 
4429  void GetBufferMemoryRequirements(
4430  VkBuffer hBuffer,
4431  VkMemoryRequirements& memReq,
4432  bool& requiresDedicatedAllocation,
4433  bool& prefersDedicatedAllocation) const;
4434  void GetImageMemoryRequirements(
4435  VkImage hImage,
4436  VkMemoryRequirements& memReq,
4437  bool& requiresDedicatedAllocation,
4438  bool& prefersDedicatedAllocation) const;
4439 
4440  // Main allocation function.
4441  VkResult AllocateMemory(
4442  const VkMemoryRequirements& vkMemReq,
4443  bool requiresDedicatedAllocation,
4444  bool prefersDedicatedAllocation,
4445  VkBuffer dedicatedBuffer,
4446  VkImage dedicatedImage,
4447  const VmaAllocationCreateInfo& createInfo,
4448  VmaSuballocationType suballocType,
4449  VmaAllocation* pAllocation);
4450 
4451  // Main deallocation function.
4452  void FreeMemory(const VmaAllocation allocation);
4453 
4454  void CalculateStats(VmaStats* pStats);
4455 
4456 #if VMA_STATS_STRING_ENABLED
4457  void PrintDetailedMap(class VmaJsonWriter& json);
4458 #endif
4459 
4460  VkResult Defragment(
4461  VmaAllocation* pAllocations,
4462  size_t allocationCount,
4463  VkBool32* pAllocationsChanged,
4464  const VmaDefragmentationInfo* pDefragmentationInfo,
4465  VmaDefragmentationStats* pDefragmentationStats);
4466 
4467  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
4468  bool TouchAllocation(VmaAllocation hAllocation);
4469 
4470  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
4471  void DestroyPool(VmaPool pool);
4472  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
4473 
4474  void SetCurrentFrameIndex(uint32_t frameIndex);
4475 
4476  void MakePoolAllocationsLost(
4477  VmaPool hPool,
4478  size_t* pLostAllocationCount);
4479 
4480  void CreateLostAllocation(VmaAllocation* pAllocation);
4481 
4482  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
4483  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
4484 
4485  VkResult Map(VmaAllocation hAllocation, void** ppData);
4486  void Unmap(VmaAllocation hAllocation);
4487 
4488  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
4489  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
4490 
4491 private:
4492  VkDeviceSize m_PreferredLargeHeapBlockSize;
4493 
4494  VkPhysicalDevice m_PhysicalDevice;
4495  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
4496 
4497  VMA_MUTEX m_PoolsMutex;
4498  // Protected by m_PoolsMutex. Sorted by pointer value.
4499  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
4500 
4501  VmaVulkanFunctions m_VulkanFunctions;
4502 
4503  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
4504 
4505  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
4506 
4507  VkResult AllocateMemoryOfType(
4508  const VkMemoryRequirements& vkMemReq,
4509  bool dedicatedAllocation,
4510  VkBuffer dedicatedBuffer,
4511  VkImage dedicatedImage,
4512  const VmaAllocationCreateInfo& createInfo,
4513  uint32_t memTypeIndex,
4514  VmaSuballocationType suballocType,
4515  VmaAllocation* pAllocation);
4516 
4517  // Allocates and registers new VkDeviceMemory specifically for single allocation.
4518  VkResult AllocateDedicatedMemory(
4519  VkDeviceSize size,
4520  VmaSuballocationType suballocType,
4521  uint32_t memTypeIndex,
4522  bool map,
4523  bool isUserDataString,
4524  void* pUserData,
4525  VkBuffer dedicatedBuffer,
4526  VkImage dedicatedImage,
4527  VmaAllocation* pAllocation);
4528 
4529  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
4530  void FreeDedicatedMemory(VmaAllocation allocation);
4531 };
4532 
4534 // Memory allocation #2 after VmaAllocator_T definition
4535 
4536 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
4537 {
4538  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
4539 }
4540 
4541 static void VmaFree(VmaAllocator hAllocator, void* ptr)
4542 {
4543  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
4544 }
4545 
4546 template<typename T>
4547 static T* VmaAllocate(VmaAllocator hAllocator)
4548 {
4549  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
4550 }
4551 
4552 template<typename T>
4553 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
4554 {
4555  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
4556 }
4557 
4558 template<typename T>
4559 static void vma_delete(VmaAllocator hAllocator, T* ptr)
4560 {
4561  if(ptr != VMA_NULL)
4562  {
4563  ptr->~T();
4564  VmaFree(hAllocator, ptr);
4565  }
4566 }
4567 
4568 template<typename T>
4569 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
4570 {
4571  if(ptr != VMA_NULL)
4572  {
4573  for(size_t i = count; i--; )
4574  ptr[i].~T();
4575  VmaFree(hAllocator, ptr);
4576  }
4577 }
4578 
4580 // VmaStringBuilder
4581 
4582 #if VMA_STATS_STRING_ENABLED
4583 
4584 class VmaStringBuilder
4585 {
4586 public:
4587  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
4588  size_t GetLength() const { return m_Data.size(); }
4589  const char* GetData() const { return m_Data.data(); }
4590 
4591  void Add(char ch) { m_Data.push_back(ch); }
4592  void Add(const char* pStr);
4593  void AddNewLine() { Add('\n'); }
4594  void AddNumber(uint32_t num);
4595  void AddNumber(uint64_t num);
4596  void AddPointer(const void* ptr);
4597 
4598 private:
4599  VmaVector< char, VmaStlAllocator<char> > m_Data;
4600 };
4601 
4602 void VmaStringBuilder::Add(const char* pStr)
4603 {
4604  const size_t strLen = strlen(pStr);
4605  if(strLen > 0)
4606  {
4607  const size_t oldCount = m_Data.size();
4608  m_Data.resize(oldCount + strLen);
4609  memcpy(m_Data.data() + oldCount, pStr, strLen);
4610  }
4611 }
4612 
4613 void VmaStringBuilder::AddNumber(uint32_t num)
4614 {
4615  char buf[11];
4616  VmaUint32ToStr(buf, sizeof(buf), num);
4617  Add(buf);
4618 }
4619 
4620 void VmaStringBuilder::AddNumber(uint64_t num)
4621 {
4622  char buf[21];
4623  VmaUint64ToStr(buf, sizeof(buf), num);
4624  Add(buf);
4625 }
4626 
4627 void VmaStringBuilder::AddPointer(const void* ptr)
4628 {
4629  char buf[21];
4630  VmaPtrToStr(buf, sizeof(buf), ptr);
4631  Add(buf);
4632 }
4633 
4634 #endif // #if VMA_STATS_STRING_ENABLED
4635 
4637 // VmaJsonWriter
4638 
4639 #if VMA_STATS_STRING_ENABLED
4640 
4641 class VmaJsonWriter
4642 {
4643 public:
4644  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
4645  ~VmaJsonWriter();
4646 
4647  void BeginObject(bool singleLine = false);
4648  void EndObject();
4649 
4650  void BeginArray(bool singleLine = false);
4651  void EndArray();
4652 
4653  void WriteString(const char* pStr);
4654  void BeginString(const char* pStr = VMA_NULL);
4655  void ContinueString(const char* pStr);
4656  void ContinueString(uint32_t n);
4657  void ContinueString(uint64_t n);
4658  void ContinueString_Pointer(const void* ptr);
4659  void EndString(const char* pStr = VMA_NULL);
4660 
4661  void WriteNumber(uint32_t n);
4662  void WriteNumber(uint64_t n);
4663  void WriteBool(bool b);
4664  void WriteNull();
4665 
4666 private:
4667  static const char* const INDENT;
4668 
4669  enum COLLECTION_TYPE
4670  {
4671  COLLECTION_TYPE_OBJECT,
4672  COLLECTION_TYPE_ARRAY,
4673  };
4674  struct StackItem
4675  {
4676  COLLECTION_TYPE type;
4677  uint32_t valueCount;
4678  bool singleLineMode;
4679  };
4680 
4681  VmaStringBuilder& m_SB;
4682  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
4683  bool m_InsideString;
4684 
4685  void BeginValue(bool isString);
4686  void WriteIndent(bool oneLess = false);
4687 };
4688 
4689 const char* const VmaJsonWriter::INDENT = " ";
4690 
4691 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
4692  m_SB(sb),
4693  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
4694  m_InsideString(false)
4695 {
4696 }
4697 
4698 VmaJsonWriter::~VmaJsonWriter()
4699 {
4700  VMA_ASSERT(!m_InsideString);
4701  VMA_ASSERT(m_Stack.empty());
4702 }
4703 
4704 void VmaJsonWriter::BeginObject(bool singleLine)
4705 {
4706  VMA_ASSERT(!m_InsideString);
4707 
4708  BeginValue(false);
4709  m_SB.Add('{');
4710 
4711  StackItem item;
4712  item.type = COLLECTION_TYPE_OBJECT;
4713  item.valueCount = 0;
4714  item.singleLineMode = singleLine;
4715  m_Stack.push_back(item);
4716 }
4717 
4718 void VmaJsonWriter::EndObject()
4719 {
4720  VMA_ASSERT(!m_InsideString);
4721 
4722  WriteIndent(true);
4723  m_SB.Add('}');
4724 
4725  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
4726  m_Stack.pop_back();
4727 }
4728 
4729 void VmaJsonWriter::BeginArray(bool singleLine)
4730 {
4731  VMA_ASSERT(!m_InsideString);
4732 
4733  BeginValue(false);
4734  m_SB.Add('[');
4735 
4736  StackItem item;
4737  item.type = COLLECTION_TYPE_ARRAY;
4738  item.valueCount = 0;
4739  item.singleLineMode = singleLine;
4740  m_Stack.push_back(item);
4741 }
4742 
4743 void VmaJsonWriter::EndArray()
4744 {
4745  VMA_ASSERT(!m_InsideString);
4746 
4747  WriteIndent(true);
4748  m_SB.Add(']');
4749 
4750  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
4751  m_Stack.pop_back();
4752 }
4753 
4754 void VmaJsonWriter::WriteString(const char* pStr)
4755 {
4756  BeginString(pStr);
4757  EndString();
4758 }
4759 
4760 void VmaJsonWriter::BeginString(const char* pStr)
4761 {
4762  VMA_ASSERT(!m_InsideString);
4763 
4764  BeginValue(true);
4765  m_SB.Add('"');
4766  m_InsideString = true;
4767  if(pStr != VMA_NULL && pStr[0] != '\0')
4768  {
4769  ContinueString(pStr);
4770  }
4771 }
4772 
4773 void VmaJsonWriter::ContinueString(const char* pStr)
4774 {
4775  VMA_ASSERT(m_InsideString);
4776 
4777  const size_t strLen = strlen(pStr);
4778  for(size_t i = 0; i < strLen; ++i)
4779  {
4780  char ch = pStr[i];
4781  if(ch == '\'')
4782  {
4783  m_SB.Add("\\\\");
4784  }
4785  else if(ch == '"')
4786  {
4787  m_SB.Add("\\\"");
4788  }
4789  else if(ch >= 32)
4790  {
4791  m_SB.Add(ch);
4792  }
4793  else switch(ch)
4794  {
4795  case '\b':
4796  m_SB.Add("\\b");
4797  break;
4798  case '\f':
4799  m_SB.Add("\\f");
4800  break;
4801  case '\n':
4802  m_SB.Add("\\n");
4803  break;
4804  case '\r':
4805  m_SB.Add("\\r");
4806  break;
4807  case '\t':
4808  m_SB.Add("\\t");
4809  break;
4810  default:
4811  VMA_ASSERT(0 && "Character not currently supported.");
4812  break;
4813  }
4814  }
4815 }
4816 
4817 void VmaJsonWriter::ContinueString(uint32_t n)
4818 {
4819  VMA_ASSERT(m_InsideString);
4820  m_SB.AddNumber(n);
4821 }
4822 
4823 void VmaJsonWriter::ContinueString(uint64_t n)
4824 {
4825  VMA_ASSERT(m_InsideString);
4826  m_SB.AddNumber(n);
4827 }
4828 
4829 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
4830 {
4831  VMA_ASSERT(m_InsideString);
4832  m_SB.AddPointer(ptr);
4833 }
4834 
4835 void VmaJsonWriter::EndString(const char* pStr)
4836 {
4837  VMA_ASSERT(m_InsideString);
4838  if(pStr != VMA_NULL && pStr[0] != '\0')
4839  {
4840  ContinueString(pStr);
4841  }
4842  m_SB.Add('"');
4843  m_InsideString = false;
4844 }
4845 
4846 void VmaJsonWriter::WriteNumber(uint32_t n)
4847 {
4848  VMA_ASSERT(!m_InsideString);
4849  BeginValue(false);
4850  m_SB.AddNumber(n);
4851 }
4852 
4853 void VmaJsonWriter::WriteNumber(uint64_t n)
4854 {
4855  VMA_ASSERT(!m_InsideString);
4856  BeginValue(false);
4857  m_SB.AddNumber(n);
4858 }
4859 
4860 void VmaJsonWriter::WriteBool(bool b)
4861 {
4862  VMA_ASSERT(!m_InsideString);
4863  BeginValue(false);
4864  m_SB.Add(b ? "true" : "false");
4865 }
4866 
4867 void VmaJsonWriter::WriteNull()
4868 {
4869  VMA_ASSERT(!m_InsideString);
4870  BeginValue(false);
4871  m_SB.Add("null");
4872 }
4873 
4874 void VmaJsonWriter::BeginValue(bool isString)
4875 {
4876  if(!m_Stack.empty())
4877  {
4878  StackItem& currItem = m_Stack.back();
4879  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4880  currItem.valueCount % 2 == 0)
4881  {
4882  VMA_ASSERT(isString);
4883  }
4884 
4885  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4886  currItem.valueCount % 2 != 0)
4887  {
4888  m_SB.Add(": ");
4889  }
4890  else if(currItem.valueCount > 0)
4891  {
4892  m_SB.Add(", ");
4893  WriteIndent();
4894  }
4895  else
4896  {
4897  WriteIndent();
4898  }
4899  ++currItem.valueCount;
4900  }
4901 }
4902 
4903 void VmaJsonWriter::WriteIndent(bool oneLess)
4904 {
4905  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
4906  {
4907  m_SB.AddNewLine();
4908 
4909  size_t count = m_Stack.size();
4910  if(count > 0 && oneLess)
4911  {
4912  --count;
4913  }
4914  for(size_t i = 0; i < count; ++i)
4915  {
4916  m_SB.Add(INDENT);
4917  }
4918  }
4919 }
4920 
4921 #endif // #if VMA_STATS_STRING_ENABLED
4922 
4924 
4925 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
4926 {
4927  if(IsUserDataString())
4928  {
4929  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
4930 
4931  FreeUserDataString(hAllocator);
4932 
4933  if(pUserData != VMA_NULL)
4934  {
4935  const char* const newStrSrc = (char*)pUserData;
4936  const size_t newStrLen = strlen(newStrSrc);
4937  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
4938  memcpy(newStrDst, newStrSrc, newStrLen + 1);
4939  m_pUserData = newStrDst;
4940  }
4941  }
4942  else
4943  {
4944  m_pUserData = pUserData;
4945  }
4946 }
4947 
4948 void VmaAllocation_T::ChangeBlockAllocation(
4949  VmaAllocator hAllocator,
4950  VmaDeviceMemoryBlock* block,
4951  VkDeviceSize offset)
4952 {
4953  VMA_ASSERT(block != VMA_NULL);
4954  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4955 
4956  // Move mapping reference counter from old block to new block.
4957  if(block != m_BlockAllocation.m_Block)
4958  {
4959  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
4960  if(IsPersistentMap())
4961  ++mapRefCount;
4962  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
4963  block->Map(hAllocator, mapRefCount, VMA_NULL);
4964  }
4965 
4966  m_BlockAllocation.m_Block = block;
4967  m_BlockAllocation.m_Offset = offset;
4968 }
4969 
4970 VkDeviceSize VmaAllocation_T::GetOffset() const
4971 {
4972  switch(m_Type)
4973  {
4974  case ALLOCATION_TYPE_BLOCK:
4975  return m_BlockAllocation.m_Offset;
4976  case ALLOCATION_TYPE_DEDICATED:
4977  return 0;
4978  default:
4979  VMA_ASSERT(0);
4980  return 0;
4981  }
4982 }
4983 
4984 VkDeviceMemory VmaAllocation_T::GetMemory() const
4985 {
4986  switch(m_Type)
4987  {
4988  case ALLOCATION_TYPE_BLOCK:
4989  return m_BlockAllocation.m_Block->GetDeviceMemory();
4990  case ALLOCATION_TYPE_DEDICATED:
4991  return m_DedicatedAllocation.m_hMemory;
4992  default:
4993  VMA_ASSERT(0);
4994  return VK_NULL_HANDLE;
4995  }
4996 }
4997 
4998 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
4999 {
5000  switch(m_Type)
5001  {
5002  case ALLOCATION_TYPE_BLOCK:
5003  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
5004  case ALLOCATION_TYPE_DEDICATED:
5005  return m_DedicatedAllocation.m_MemoryTypeIndex;
5006  default:
5007  VMA_ASSERT(0);
5008  return UINT32_MAX;
5009  }
5010 }
5011 
5012 void* VmaAllocation_T::GetMappedData() const
5013 {
5014  switch(m_Type)
5015  {
5016  case ALLOCATION_TYPE_BLOCK:
5017  if(m_MapCount != 0)
5018  {
5019  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
5020  VMA_ASSERT(pBlockData != VMA_NULL);
5021  return (char*)pBlockData + m_BlockAllocation.m_Offset;
5022  }
5023  else
5024  {
5025  return VMA_NULL;
5026  }
5027  break;
5028  case ALLOCATION_TYPE_DEDICATED:
5029  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
5030  return m_DedicatedAllocation.m_pMappedData;
5031  default:
5032  VMA_ASSERT(0);
5033  return VMA_NULL;
5034  }
5035 }
5036 
5037 bool VmaAllocation_T::CanBecomeLost() const
5038 {
5039  switch(m_Type)
5040  {
5041  case ALLOCATION_TYPE_BLOCK:
5042  return m_BlockAllocation.m_CanBecomeLost;
5043  case ALLOCATION_TYPE_DEDICATED:
5044  return false;
5045  default:
5046  VMA_ASSERT(0);
5047  return false;
5048  }
5049 }
5050 
5051 VmaPool VmaAllocation_T::GetPool() const
5052 {
5053  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5054  return m_BlockAllocation.m_hPool;
5055 }
5056 
5057 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
5058 {
5059  VMA_ASSERT(CanBecomeLost());
5060 
5061  /*
5062  Warning: This is a carefully designed algorithm.
5063  Do not modify unless you really know what you're doing :)
5064  */
5065  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
5066  for(;;)
5067  {
5068  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
5069  {
5070  VMA_ASSERT(0);
5071  return false;
5072  }
5073  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
5074  {
5075  return false;
5076  }
5077  else // Last use time earlier than current time.
5078  {
5079  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
5080  {
5081  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
5082  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
5083  return true;
5084  }
5085  }
5086  }
5087 }
5088 
5089 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
5090 {
5091  VMA_ASSERT(IsUserDataString());
5092  if(m_pUserData != VMA_NULL)
5093  {
5094  char* const oldStr = (char*)m_pUserData;
5095  const size_t oldStrLen = strlen(oldStr);
5096  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
5097  m_pUserData = VMA_NULL;
5098  }
5099 }
5100 
5101 void VmaAllocation_T::BlockAllocMap()
5102 {
5103  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
5104 
5105  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
5106  {
5107  ++m_MapCount;
5108  }
5109  else
5110  {
5111  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
5112  }
5113 }
5114 
5115 void VmaAllocation_T::BlockAllocUnmap()
5116 {
5117  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
5118 
5119  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
5120  {
5121  --m_MapCount;
5122  }
5123  else
5124  {
5125  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
5126  }
5127 }
5128 
5129 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
5130 {
5131  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
5132 
5133  if(m_MapCount != 0)
5134  {
5135  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
5136  {
5137  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
5138  *ppData = m_DedicatedAllocation.m_pMappedData;
5139  ++m_MapCount;
5140  return VK_SUCCESS;
5141  }
5142  else
5143  {
5144  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
5145  return VK_ERROR_MEMORY_MAP_FAILED;
5146  }
5147  }
5148  else
5149  {
5150  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
5151  hAllocator->m_hDevice,
5152  m_DedicatedAllocation.m_hMemory,
5153  0, // offset
5154  VK_WHOLE_SIZE,
5155  0, // flags
5156  ppData);
5157  if(result == VK_SUCCESS)
5158  {
5159  m_DedicatedAllocation.m_pMappedData = *ppData;
5160  m_MapCount = 1;
5161  }
5162  return result;
5163  }
5164 }
5165 
5166 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
5167 {
5168  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
5169 
5170  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
5171  {
5172  --m_MapCount;
5173  if(m_MapCount == 0)
5174  {
5175  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
5176  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
5177  hAllocator->m_hDevice,
5178  m_DedicatedAllocation.m_hMemory);
5179  }
5180  }
5181  else
5182  {
5183  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
5184  }
5185 }
5186 
5187 #if VMA_STATS_STRING_ENABLED
5188 
5189 // Correspond to values of enum VmaSuballocationType.
5190 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
5191  "FREE",
5192  "UNKNOWN",
5193  "BUFFER",
5194  "IMAGE_UNKNOWN",
5195  "IMAGE_LINEAR",
5196  "IMAGE_OPTIMAL",
5197 };
5198 
5199 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
5200 {
5201  json.BeginObject();
5202 
5203  json.WriteString("Blocks");
5204  json.WriteNumber(stat.blockCount);
5205 
5206  json.WriteString("Allocations");
5207  json.WriteNumber(stat.allocationCount);
5208 
5209  json.WriteString("UnusedRanges");
5210  json.WriteNumber(stat.unusedRangeCount);
5211 
5212  json.WriteString("UsedBytes");
5213  json.WriteNumber(stat.usedBytes);
5214 
5215  json.WriteString("UnusedBytes");
5216  json.WriteNumber(stat.unusedBytes);
5217 
5218  if(stat.allocationCount > 1)
5219  {
5220  json.WriteString("AllocationSize");
5221  json.BeginObject(true);
5222  json.WriteString("Min");
5223  json.WriteNumber(stat.allocationSizeMin);
5224  json.WriteString("Avg");
5225  json.WriteNumber(stat.allocationSizeAvg);
5226  json.WriteString("Max");
5227  json.WriteNumber(stat.allocationSizeMax);
5228  json.EndObject();
5229  }
5230 
5231  if(stat.unusedRangeCount > 1)
5232  {
5233  json.WriteString("UnusedRangeSize");
5234  json.BeginObject(true);
5235  json.WriteString("Min");
5236  json.WriteNumber(stat.unusedRangeSizeMin);
5237  json.WriteString("Avg");
5238  json.WriteNumber(stat.unusedRangeSizeAvg);
5239  json.WriteString("Max");
5240  json.WriteNumber(stat.unusedRangeSizeMax);
5241  json.EndObject();
5242  }
5243 
5244  json.EndObject();
5245 }
5246 
5247 #endif // #if VMA_STATS_STRING_ENABLED
5248 
5249 struct VmaSuballocationItemSizeLess
5250 {
5251  bool operator()(
5252  const VmaSuballocationList::iterator lhs,
5253  const VmaSuballocationList::iterator rhs) const
5254  {
5255  return lhs->size < rhs->size;
5256  }
5257  bool operator()(
5258  const VmaSuballocationList::iterator lhs,
5259  VkDeviceSize rhsSize) const
5260  {
5261  return lhs->size < rhsSize;
5262  }
5263 };
5264 
5266 // class VmaBlockMetadata
5267 
5268 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
5269  m_Size(0),
5270  m_FreeCount(0),
5271  m_SumFreeSize(0),
5272  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
5273  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
5274 {
5275 }
5276 
5277 VmaBlockMetadata::~VmaBlockMetadata()
5278 {
5279 }
5280 
5281 void VmaBlockMetadata::Init(VkDeviceSize size)
5282 {
5283  m_Size = size;
5284  m_FreeCount = 1;
5285  m_SumFreeSize = size;
5286 
5287  VmaSuballocation suballoc = {};
5288  suballoc.offset = 0;
5289  suballoc.size = size;
5290  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5291  suballoc.hAllocation = VK_NULL_HANDLE;
5292 
5293  m_Suballocations.push_back(suballoc);
5294  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
5295  --suballocItem;
5296  m_FreeSuballocationsBySize.push_back(suballocItem);
5297 }
5298 
5299 bool VmaBlockMetadata::Validate() const
5300 {
5301  if(m_Suballocations.empty())
5302  {
5303  return false;
5304  }
5305 
5306  // Expected offset of new suballocation as calculates from previous ones.
5307  VkDeviceSize calculatedOffset = 0;
5308  // Expected number of free suballocations as calculated from traversing their list.
5309  uint32_t calculatedFreeCount = 0;
5310  // Expected sum size of free suballocations as calculated from traversing their list.
5311  VkDeviceSize calculatedSumFreeSize = 0;
5312  // Expected number of free suballocations that should be registered in
5313  // m_FreeSuballocationsBySize calculated from traversing their list.
5314  size_t freeSuballocationsToRegister = 0;
5315  // True if previous visisted suballocation was free.
5316  bool prevFree = false;
5317 
5318  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5319  suballocItem != m_Suballocations.cend();
5320  ++suballocItem)
5321  {
5322  const VmaSuballocation& subAlloc = *suballocItem;
5323 
5324  // Actual offset of this suballocation doesn't match expected one.
5325  if(subAlloc.offset != calculatedOffset)
5326  {
5327  return false;
5328  }
5329 
5330  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
5331  // Two adjacent free suballocations are invalid. They should be merged.
5332  if(prevFree && currFree)
5333  {
5334  return false;
5335  }
5336 
5337  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
5338  {
5339  return false;
5340  }
5341 
5342  if(currFree)
5343  {
5344  calculatedSumFreeSize += subAlloc.size;
5345  ++calculatedFreeCount;
5346  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5347  {
5348  ++freeSuballocationsToRegister;
5349  }
5350  }
5351  else
5352  {
5353  if(subAlloc.hAllocation->GetOffset() != subAlloc.offset)
5354  {
5355  return false;
5356  }
5357  if(subAlloc.hAllocation->GetSize() != subAlloc.size)
5358  {
5359  return false;
5360  }
5361  }
5362 
5363  calculatedOffset += subAlloc.size;
5364  prevFree = currFree;
5365  }
5366 
5367  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
5368  // match expected one.
5369  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
5370  {
5371  return false;
5372  }
5373 
5374  VkDeviceSize lastSize = 0;
5375  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
5376  {
5377  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
5378 
5379  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
5380  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
5381  {
5382  return false;
5383  }
5384  // They must be sorted by size ascending.
5385  if(suballocItem->size < lastSize)
5386  {
5387  return false;
5388  }
5389 
5390  lastSize = suballocItem->size;
5391  }
5392 
5393  // Check if totals match calculacted values.
5394  if(!ValidateFreeSuballocationList() ||
5395  (calculatedOffset != m_Size) ||
5396  (calculatedSumFreeSize != m_SumFreeSize) ||
5397  (calculatedFreeCount != m_FreeCount))
5398  {
5399  return false;
5400  }
5401 
5402  return true;
5403 }
5404 
5405 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
5406 {
5407  if(!m_FreeSuballocationsBySize.empty())
5408  {
5409  return m_FreeSuballocationsBySize.back()->size;
5410  }
5411  else
5412  {
5413  return 0;
5414  }
5415 }
5416 
5417 bool VmaBlockMetadata::IsEmpty() const
5418 {
5419  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
5420 }
5421 
5422 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
5423 {
5424  outInfo.blockCount = 1;
5425 
5426  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
5427  outInfo.allocationCount = rangeCount - m_FreeCount;
5428  outInfo.unusedRangeCount = m_FreeCount;
5429 
5430  outInfo.unusedBytes = m_SumFreeSize;
5431  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
5432 
5433  outInfo.allocationSizeMin = UINT64_MAX;
5434  outInfo.allocationSizeMax = 0;
5435  outInfo.unusedRangeSizeMin = UINT64_MAX;
5436  outInfo.unusedRangeSizeMax = 0;
5437 
5438  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5439  suballocItem != m_Suballocations.cend();
5440  ++suballocItem)
5441  {
5442  const VmaSuballocation& suballoc = *suballocItem;
5443  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
5444  {
5445  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
5446  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
5447  }
5448  else
5449  {
5450  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
5451  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
5452  }
5453  }
5454 }
5455 
5456 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
5457 {
5458  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
5459 
5460  inoutStats.size += m_Size;
5461  inoutStats.unusedSize += m_SumFreeSize;
5462  inoutStats.allocationCount += rangeCount - m_FreeCount;
5463  inoutStats.unusedRangeCount += m_FreeCount;
5464  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
5465 }
5466 
5467 #if VMA_STATS_STRING_ENABLED
5468 
5469 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
5470 {
5471  json.BeginObject();
5472 
5473  json.WriteString("TotalBytes");
5474  json.WriteNumber(m_Size);
5475 
5476  json.WriteString("UnusedBytes");
5477  json.WriteNumber(m_SumFreeSize);
5478 
5479  json.WriteString("Allocations");
5480  json.WriteNumber((uint64_t)m_Suballocations.size() - m_FreeCount);
5481 
5482  json.WriteString("UnusedRanges");
5483  json.WriteNumber(m_FreeCount);
5484 
5485  json.WriteString("Suballocations");
5486  json.BeginArray();
5487  size_t i = 0;
5488  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5489  suballocItem != m_Suballocations.cend();
5490  ++suballocItem, ++i)
5491  {
5492  json.BeginObject(true);
5493 
5494  json.WriteString("Type");
5495  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
5496 
5497  json.WriteString("Size");
5498  json.WriteNumber(suballocItem->size);
5499 
5500  json.WriteString("Offset");
5501  json.WriteNumber(suballocItem->offset);
5502 
5503  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
5504  {
5505  const void* pUserData = suballocItem->hAllocation->GetUserData();
5506  if(pUserData != VMA_NULL)
5507  {
5508  json.WriteString("UserData");
5509  if(suballocItem->hAllocation->IsUserDataString())
5510  {
5511  json.WriteString((const char*)pUserData);
5512  }
5513  else
5514  {
5515  json.BeginString();
5516  json.ContinueString_Pointer(pUserData);
5517  json.EndString();
5518  }
5519  }
5520  }
5521 
5522  json.EndObject();
5523  }
5524  json.EndArray();
5525 
5526  json.EndObject();
5527 }
5528 
5529 #endif // #if VMA_STATS_STRING_ENABLED
5530 
5531 /*
5532 How many suitable free suballocations to analyze before choosing best one.
5533 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
5534  be chosen.
5535 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
5536  suballocations will be analized and best one will be chosen.
5537 - Any other value is also acceptable.
5538 */
5539 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
5540 
5541 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
5542 {
5543  VMA_ASSERT(IsEmpty());
5544  pAllocationRequest->offset = 0;
5545  pAllocationRequest->sumFreeSize = m_SumFreeSize;
5546  pAllocationRequest->sumItemSize = 0;
5547  pAllocationRequest->item = m_Suballocations.begin();
5548  pAllocationRequest->itemsToMakeLostCount = 0;
5549 }
5550 
5551 bool VmaBlockMetadata::CreateAllocationRequest(
5552  uint32_t currentFrameIndex,
5553  uint32_t frameInUseCount,
5554  VkDeviceSize bufferImageGranularity,
5555  VkDeviceSize allocSize,
5556  VkDeviceSize allocAlignment,
5557  VmaSuballocationType allocType,
5558  bool canMakeOtherLost,
5559  VmaAllocationRequest* pAllocationRequest)
5560 {
5561  VMA_ASSERT(allocSize > 0);
5562  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5563  VMA_ASSERT(pAllocationRequest != VMA_NULL);
5564  VMA_HEAVY_ASSERT(Validate());
5565 
5566  // There is not enough total free space in this block to fullfill the request: Early return.
5567  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
5568  {
5569  return false;
5570  }
5571 
5572  // New algorithm, efficiently searching freeSuballocationsBySize.
5573  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
5574  if(freeSuballocCount > 0)
5575  {
5576  if(VMA_BEST_FIT)
5577  {
5578  // Find first free suballocation with size not less than allocSize.
5579  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5580  m_FreeSuballocationsBySize.data(),
5581  m_FreeSuballocationsBySize.data() + freeSuballocCount,
5582  allocSize,
5583  VmaSuballocationItemSizeLess());
5584  size_t index = it - m_FreeSuballocationsBySize.data();
5585  for(; index < freeSuballocCount; ++index)
5586  {
5587  if(CheckAllocation(
5588  currentFrameIndex,
5589  frameInUseCount,
5590  bufferImageGranularity,
5591  allocSize,
5592  allocAlignment,
5593  allocType,
5594  m_FreeSuballocationsBySize[index],
5595  false, // canMakeOtherLost
5596  &pAllocationRequest->offset,
5597  &pAllocationRequest->itemsToMakeLostCount,
5598  &pAllocationRequest->sumFreeSize,
5599  &pAllocationRequest->sumItemSize))
5600  {
5601  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5602  return true;
5603  }
5604  }
5605  }
5606  else
5607  {
5608  // Search staring from biggest suballocations.
5609  for(size_t index = freeSuballocCount; index--; )
5610  {
5611  if(CheckAllocation(
5612  currentFrameIndex,
5613  frameInUseCount,
5614  bufferImageGranularity,
5615  allocSize,
5616  allocAlignment,
5617  allocType,
5618  m_FreeSuballocationsBySize[index],
5619  false, // canMakeOtherLost
5620  &pAllocationRequest->offset,
5621  &pAllocationRequest->itemsToMakeLostCount,
5622  &pAllocationRequest->sumFreeSize,
5623  &pAllocationRequest->sumItemSize))
5624  {
5625  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5626  return true;
5627  }
5628  }
5629  }
5630  }
5631 
5632  if(canMakeOtherLost)
5633  {
5634  // Brute-force algorithm. TODO: Come up with something better.
5635 
5636  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
5637  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
5638 
5639  VmaAllocationRequest tmpAllocRequest = {};
5640  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
5641  suballocIt != m_Suballocations.end();
5642  ++suballocIt)
5643  {
5644  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
5645  suballocIt->hAllocation->CanBecomeLost())
5646  {
5647  if(CheckAllocation(
5648  currentFrameIndex,
5649  frameInUseCount,
5650  bufferImageGranularity,
5651  allocSize,
5652  allocAlignment,
5653  allocType,
5654  suballocIt,
5655  canMakeOtherLost,
5656  &tmpAllocRequest.offset,
5657  &tmpAllocRequest.itemsToMakeLostCount,
5658  &tmpAllocRequest.sumFreeSize,
5659  &tmpAllocRequest.sumItemSize))
5660  {
5661  tmpAllocRequest.item = suballocIt;
5662 
5663  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
5664  {
5665  *pAllocationRequest = tmpAllocRequest;
5666  }
5667  }
5668  }
5669  }
5670 
5671  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
5672  {
5673  return true;
5674  }
5675  }
5676 
5677  return false;
5678 }
5679 
5680 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
5681  uint32_t currentFrameIndex,
5682  uint32_t frameInUseCount,
5683  VmaAllocationRequest* pAllocationRequest)
5684 {
5685  while(pAllocationRequest->itemsToMakeLostCount > 0)
5686  {
5687  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
5688  {
5689  ++pAllocationRequest->item;
5690  }
5691  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5692  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
5693  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
5694  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5695  {
5696  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
5697  --pAllocationRequest->itemsToMakeLostCount;
5698  }
5699  else
5700  {
5701  return false;
5702  }
5703  }
5704 
5705  VMA_HEAVY_ASSERT(Validate());
5706  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5707  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
5708 
5709  return true;
5710 }
5711 
5712 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
5713 {
5714  uint32_t lostAllocationCount = 0;
5715  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
5716  it != m_Suballocations.end();
5717  ++it)
5718  {
5719  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
5720  it->hAllocation->CanBecomeLost() &&
5721  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5722  {
5723  it = FreeSuballocation(it);
5724  ++lostAllocationCount;
5725  }
5726  }
5727  return lostAllocationCount;
5728 }
5729 
5730 void VmaBlockMetadata::Alloc(
5731  const VmaAllocationRequest& request,
5732  VmaSuballocationType type,
5733  VkDeviceSize allocSize,
5734  VmaAllocation hAllocation)
5735 {
5736  VMA_ASSERT(request.item != m_Suballocations.end());
5737  VmaSuballocation& suballoc = *request.item;
5738  // Given suballocation is a free block.
5739  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5740  // Given offset is inside this suballocation.
5741  VMA_ASSERT(request.offset >= suballoc.offset);
5742  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
5743  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
5744  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
5745 
5746  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
5747  // it to become used.
5748  UnregisterFreeSuballocation(request.item);
5749 
5750  suballoc.offset = request.offset;
5751  suballoc.size = allocSize;
5752  suballoc.type = type;
5753  suballoc.hAllocation = hAllocation;
5754 
5755  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
5756  if(paddingEnd)
5757  {
5758  VmaSuballocation paddingSuballoc = {};
5759  paddingSuballoc.offset = request.offset + allocSize;
5760  paddingSuballoc.size = paddingEnd;
5761  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5762  VmaSuballocationList::iterator next = request.item;
5763  ++next;
5764  const VmaSuballocationList::iterator paddingEndItem =
5765  m_Suballocations.insert(next, paddingSuballoc);
5766  RegisterFreeSuballocation(paddingEndItem);
5767  }
5768 
5769  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
5770  if(paddingBegin)
5771  {
5772  VmaSuballocation paddingSuballoc = {};
5773  paddingSuballoc.offset = request.offset - paddingBegin;
5774  paddingSuballoc.size = paddingBegin;
5775  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5776  const VmaSuballocationList::iterator paddingBeginItem =
5777  m_Suballocations.insert(request.item, paddingSuballoc);
5778  RegisterFreeSuballocation(paddingBeginItem);
5779  }
5780 
5781  // Update totals.
5782  m_FreeCount = m_FreeCount - 1;
5783  if(paddingBegin > 0)
5784  {
5785  ++m_FreeCount;
5786  }
5787  if(paddingEnd > 0)
5788  {
5789  ++m_FreeCount;
5790  }
5791  m_SumFreeSize -= allocSize;
5792 }
5793 
5794 void VmaBlockMetadata::Free(const VmaAllocation allocation)
5795 {
5796  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5797  suballocItem != m_Suballocations.end();
5798  ++suballocItem)
5799  {
5800  VmaSuballocation& suballoc = *suballocItem;
5801  if(suballoc.hAllocation == allocation)
5802  {
5803  FreeSuballocation(suballocItem);
5804  VMA_HEAVY_ASSERT(Validate());
5805  return;
5806  }
5807  }
5808  VMA_ASSERT(0 && "Not found!");
5809 }
5810 
5811 void VmaBlockMetadata::FreeAtOffset(VkDeviceSize offset)
5812 {
5813  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5814  suballocItem != m_Suballocations.end();
5815  ++suballocItem)
5816  {
5817  VmaSuballocation& suballoc = *suballocItem;
5818  if(suballoc.offset == offset)
5819  {
5820  FreeSuballocation(suballocItem);
5821  return;
5822  }
5823  }
5824  VMA_ASSERT(0 && "Not found!");
5825 }
5826 
5827 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
5828 {
5829  VkDeviceSize lastSize = 0;
5830  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
5831  {
5832  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
5833 
5834  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
5835  {
5836  VMA_ASSERT(0);
5837  return false;
5838  }
5839  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5840  {
5841  VMA_ASSERT(0);
5842  return false;
5843  }
5844  if(it->size < lastSize)
5845  {
5846  VMA_ASSERT(0);
5847  return false;
5848  }
5849 
5850  lastSize = it->size;
5851  }
5852  return true;
5853 }
5854 
5855 bool VmaBlockMetadata::CheckAllocation(
5856  uint32_t currentFrameIndex,
5857  uint32_t frameInUseCount,
5858  VkDeviceSize bufferImageGranularity,
5859  VkDeviceSize allocSize,
5860  VkDeviceSize allocAlignment,
5861  VmaSuballocationType allocType,
5862  VmaSuballocationList::const_iterator suballocItem,
5863  bool canMakeOtherLost,
5864  VkDeviceSize* pOffset,
5865  size_t* itemsToMakeLostCount,
5866  VkDeviceSize* pSumFreeSize,
5867  VkDeviceSize* pSumItemSize) const
5868 {
5869  VMA_ASSERT(allocSize > 0);
5870  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5871  VMA_ASSERT(suballocItem != m_Suballocations.cend());
5872  VMA_ASSERT(pOffset != VMA_NULL);
5873 
5874  *itemsToMakeLostCount = 0;
5875  *pSumFreeSize = 0;
5876  *pSumItemSize = 0;
5877 
5878  if(canMakeOtherLost)
5879  {
5880  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5881  {
5882  *pSumFreeSize = suballocItem->size;
5883  }
5884  else
5885  {
5886  if(suballocItem->hAllocation->CanBecomeLost() &&
5887  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5888  {
5889  ++*itemsToMakeLostCount;
5890  *pSumItemSize = suballocItem->size;
5891  }
5892  else
5893  {
5894  return false;
5895  }
5896  }
5897 
5898  // Remaining size is too small for this request: Early return.
5899  if(m_Size - suballocItem->offset < allocSize)
5900  {
5901  return false;
5902  }
5903 
5904  // Start from offset equal to beginning of this suballocation.
5905  *pOffset = suballocItem->offset;
5906 
5907  // Apply VMA_DEBUG_MARGIN at the beginning.
5908  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5909  {
5910  *pOffset += VMA_DEBUG_MARGIN;
5911  }
5912 
5913  // Apply alignment.
5914  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5915  *pOffset = VmaAlignUp(*pOffset, alignment);
5916 
5917  // Check previous suballocations for BufferImageGranularity conflicts.
5918  // Make bigger alignment if necessary.
5919  if(bufferImageGranularity > 1)
5920  {
5921  bool bufferImageGranularityConflict = false;
5922  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5923  while(prevSuballocItem != m_Suballocations.cbegin())
5924  {
5925  --prevSuballocItem;
5926  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5927  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5928  {
5929  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5930  {
5931  bufferImageGranularityConflict = true;
5932  break;
5933  }
5934  }
5935  else
5936  // Already on previous page.
5937  break;
5938  }
5939  if(bufferImageGranularityConflict)
5940  {
5941  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5942  }
5943  }
5944 
5945  // Now that we have final *pOffset, check if we are past suballocItem.
5946  // If yes, return false - this function should be called for another suballocItem as starting point.
5947  if(*pOffset >= suballocItem->offset + suballocItem->size)
5948  {
5949  return false;
5950  }
5951 
5952  // Calculate padding at the beginning based on current offset.
5953  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
5954 
5955  // Calculate required margin at the end if this is not last suballocation.
5956  VmaSuballocationList::const_iterator next = suballocItem;
5957  ++next;
5958  const VkDeviceSize requiredEndMargin =
5959  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5960 
5961  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
5962  // Another early return check.
5963  if(suballocItem->offset + totalSize > m_Size)
5964  {
5965  return false;
5966  }
5967 
5968  // Advance lastSuballocItem until desired size is reached.
5969  // Update itemsToMakeLostCount.
5970  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
5971  if(totalSize > suballocItem->size)
5972  {
5973  VkDeviceSize remainingSize = totalSize - suballocItem->size;
5974  while(remainingSize > 0)
5975  {
5976  ++lastSuballocItem;
5977  if(lastSuballocItem == m_Suballocations.cend())
5978  {
5979  return false;
5980  }
5981  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5982  {
5983  *pSumFreeSize += lastSuballocItem->size;
5984  }
5985  else
5986  {
5987  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
5988  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
5989  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5990  {
5991  ++*itemsToMakeLostCount;
5992  *pSumItemSize += lastSuballocItem->size;
5993  }
5994  else
5995  {
5996  return false;
5997  }
5998  }
5999  remainingSize = (lastSuballocItem->size < remainingSize) ?
6000  remainingSize - lastSuballocItem->size : 0;
6001  }
6002  }
6003 
6004  // Check next suballocations for BufferImageGranularity conflicts.
6005  // If conflict exists, we must mark more allocations lost or fail.
6006  if(bufferImageGranularity > 1)
6007  {
6008  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
6009  ++nextSuballocItem;
6010  while(nextSuballocItem != m_Suballocations.cend())
6011  {
6012  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
6013  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
6014  {
6015  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
6016  {
6017  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
6018  if(nextSuballoc.hAllocation->CanBecomeLost() &&
6019  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
6020  {
6021  ++*itemsToMakeLostCount;
6022  }
6023  else
6024  {
6025  return false;
6026  }
6027  }
6028  }
6029  else
6030  {
6031  // Already on next page.
6032  break;
6033  }
6034  ++nextSuballocItem;
6035  }
6036  }
6037  }
6038  else
6039  {
6040  const VmaSuballocation& suballoc = *suballocItem;
6041  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
6042 
6043  *pSumFreeSize = suballoc.size;
6044 
6045  // Size of this suballocation is too small for this request: Early return.
6046  if(suballoc.size < allocSize)
6047  {
6048  return false;
6049  }
6050 
6051  // Start from offset equal to beginning of this suballocation.
6052  *pOffset = suballoc.offset;
6053 
6054  // Apply VMA_DEBUG_MARGIN at the beginning.
6055  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
6056  {
6057  *pOffset += VMA_DEBUG_MARGIN;
6058  }
6059 
6060  // Apply alignment.
6061  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
6062  *pOffset = VmaAlignUp(*pOffset, alignment);
6063 
6064  // Check previous suballocations for BufferImageGranularity conflicts.
6065  // Make bigger alignment if necessary.
6066  if(bufferImageGranularity > 1)
6067  {
6068  bool bufferImageGranularityConflict = false;
6069  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
6070  while(prevSuballocItem != m_Suballocations.cbegin())
6071  {
6072  --prevSuballocItem;
6073  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
6074  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
6075  {
6076  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
6077  {
6078  bufferImageGranularityConflict = true;
6079  break;
6080  }
6081  }
6082  else
6083  // Already on previous page.
6084  break;
6085  }
6086  if(bufferImageGranularityConflict)
6087  {
6088  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
6089  }
6090  }
6091 
6092  // Calculate padding at the beginning based on current offset.
6093  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
6094 
6095  // Calculate required margin at the end if this is not last suballocation.
6096  VmaSuballocationList::const_iterator next = suballocItem;
6097  ++next;
6098  const VkDeviceSize requiredEndMargin =
6099  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
6100 
6101  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
6102  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
6103  {
6104  return false;
6105  }
6106 
6107  // Check next suballocations for BufferImageGranularity conflicts.
6108  // If conflict exists, allocation cannot be made here.
6109  if(bufferImageGranularity > 1)
6110  {
6111  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
6112  ++nextSuballocItem;
6113  while(nextSuballocItem != m_Suballocations.cend())
6114  {
6115  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
6116  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
6117  {
6118  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
6119  {
6120  return false;
6121  }
6122  }
6123  else
6124  {
6125  // Already on next page.
6126  break;
6127  }
6128  ++nextSuballocItem;
6129  }
6130  }
6131  }
6132 
6133  // All tests passed: Success. pOffset is already filled.
6134  return true;
6135 }
6136 
6137 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
6138 {
6139  VMA_ASSERT(item != m_Suballocations.end());
6140  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
6141 
6142  VmaSuballocationList::iterator nextItem = item;
6143  ++nextItem;
6144  VMA_ASSERT(nextItem != m_Suballocations.end());
6145  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
6146 
6147  item->size += nextItem->size;
6148  --m_FreeCount;
6149  m_Suballocations.erase(nextItem);
6150 }
6151 
6152 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
6153 {
6154  // Change this suballocation to be marked as free.
6155  VmaSuballocation& suballoc = *suballocItem;
6156  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
6157  suballoc.hAllocation = VK_NULL_HANDLE;
6158 
6159  // Update totals.
6160  ++m_FreeCount;
6161  m_SumFreeSize += suballoc.size;
6162 
6163  // Merge with previous and/or next suballocation if it's also free.
6164  bool mergeWithNext = false;
6165  bool mergeWithPrev = false;
6166 
6167  VmaSuballocationList::iterator nextItem = suballocItem;
6168  ++nextItem;
6169  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
6170  {
6171  mergeWithNext = true;
6172  }
6173 
6174  VmaSuballocationList::iterator prevItem = suballocItem;
6175  if(suballocItem != m_Suballocations.begin())
6176  {
6177  --prevItem;
6178  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
6179  {
6180  mergeWithPrev = true;
6181  }
6182  }
6183 
6184  if(mergeWithNext)
6185  {
6186  UnregisterFreeSuballocation(nextItem);
6187  MergeFreeWithNext(suballocItem);
6188  }
6189 
6190  if(mergeWithPrev)
6191  {
6192  UnregisterFreeSuballocation(prevItem);
6193  MergeFreeWithNext(prevItem);
6194  RegisterFreeSuballocation(prevItem);
6195  return prevItem;
6196  }
6197  else
6198  {
6199  RegisterFreeSuballocation(suballocItem);
6200  return suballocItem;
6201  }
6202 }
6203 
6204 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
6205 {
6206  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
6207  VMA_ASSERT(item->size > 0);
6208 
6209  // You may want to enable this validation at the beginning or at the end of
6210  // this function, depending on what do you want to check.
6211  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6212 
6213  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6214  {
6215  if(m_FreeSuballocationsBySize.empty())
6216  {
6217  m_FreeSuballocationsBySize.push_back(item);
6218  }
6219  else
6220  {
6221  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
6222  }
6223  }
6224 
6225  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6226 }
6227 
6228 
6229 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
6230 {
6231  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
6232  VMA_ASSERT(item->size > 0);
6233 
6234  // You may want to enable this validation at the beginning or at the end of
6235  // this function, depending on what do you want to check.
6236  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6237 
6238  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6239  {
6240  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
6241  m_FreeSuballocationsBySize.data(),
6242  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
6243  item,
6244  VmaSuballocationItemSizeLess());
6245  for(size_t index = it - m_FreeSuballocationsBySize.data();
6246  index < m_FreeSuballocationsBySize.size();
6247  ++index)
6248  {
6249  if(m_FreeSuballocationsBySize[index] == item)
6250  {
6251  VmaVectorRemove(m_FreeSuballocationsBySize, index);
6252  return;
6253  }
6254  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
6255  }
6256  VMA_ASSERT(0 && "Not found.");
6257  }
6258 
6259  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6260 }
6261 
6263 // class VmaDeviceMemoryBlock
6264 
6265 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
6266  m_Metadata(hAllocator),
6267  m_MemoryTypeIndex(UINT32_MAX),
6268  m_hMemory(VK_NULL_HANDLE),
6269  m_MapCount(0),
6270  m_pMappedData(VMA_NULL)
6271 {
6272 }
6273 
6274 void VmaDeviceMemoryBlock::Init(
6275  uint32_t newMemoryTypeIndex,
6276  VkDeviceMemory newMemory,
6277  VkDeviceSize newSize)
6278 {
6279  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
6280 
6281  m_MemoryTypeIndex = newMemoryTypeIndex;
6282  m_hMemory = newMemory;
6283 
6284  m_Metadata.Init(newSize);
6285 }
6286 
6287 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
6288 {
6289  // This is the most important assert in the entire library.
6290  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
6291  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
6292 
6293  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
6294  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
6295  m_hMemory = VK_NULL_HANDLE;
6296 }
6297 
6298 bool VmaDeviceMemoryBlock::Validate() const
6299 {
6300  if((m_hMemory == VK_NULL_HANDLE) ||
6301  (m_Metadata.GetSize() == 0))
6302  {
6303  return false;
6304  }
6305 
6306  return m_Metadata.Validate();
6307 }
6308 
6309 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
6310 {
6311  if(count == 0)
6312  {
6313  return VK_SUCCESS;
6314  }
6315 
6316  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6317  if(m_MapCount != 0)
6318  {
6319  m_MapCount += count;
6320  VMA_ASSERT(m_pMappedData != VMA_NULL);
6321  if(ppData != VMA_NULL)
6322  {
6323  *ppData = m_pMappedData;
6324  }
6325  return VK_SUCCESS;
6326  }
6327  else
6328  {
6329  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
6330  hAllocator->m_hDevice,
6331  m_hMemory,
6332  0, // offset
6333  VK_WHOLE_SIZE,
6334  0, // flags
6335  &m_pMappedData);
6336  if(result == VK_SUCCESS)
6337  {
6338  if(ppData != VMA_NULL)
6339  {
6340  *ppData = m_pMappedData;
6341  }
6342  m_MapCount = count;
6343  }
6344  return result;
6345  }
6346 }
6347 
6348 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
6349 {
6350  if(count == 0)
6351  {
6352  return;
6353  }
6354 
6355  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6356  if(m_MapCount >= count)
6357  {
6358  m_MapCount -= count;
6359  if(m_MapCount == 0)
6360  {
6361  m_pMappedData = VMA_NULL;
6362  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
6363  }
6364  }
6365  else
6366  {
6367  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
6368  }
6369 }
6370 
6371 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
6372  const VmaAllocator hAllocator,
6373  const VmaAllocation hAllocation,
6374  VkBuffer hBuffer)
6375 {
6376  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
6377  hAllocation->GetBlock() == this);
6378  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
6379  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6380  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
6381  hAllocator->m_hDevice,
6382  hBuffer,
6383  m_hMemory,
6384  hAllocation->GetOffset());
6385 }
6386 
6387 VkResult VmaDeviceMemoryBlock::BindImageMemory(
6388  const VmaAllocator hAllocator,
6389  const VmaAllocation hAllocation,
6390  VkImage hImage)
6391 {
6392  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
6393  hAllocation->GetBlock() == this);
6394  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
6395  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6396  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
6397  hAllocator->m_hDevice,
6398  hImage,
6399  m_hMemory,
6400  hAllocation->GetOffset());
6401 }
6402 
6403 static void InitStatInfo(VmaStatInfo& outInfo)
6404 {
6405  memset(&outInfo, 0, sizeof(outInfo));
6406  outInfo.allocationSizeMin = UINT64_MAX;
6407  outInfo.unusedRangeSizeMin = UINT64_MAX;
6408 }
6409 
6410 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
6411 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
6412 {
6413  inoutInfo.blockCount += srcInfo.blockCount;
6414  inoutInfo.allocationCount += srcInfo.allocationCount;
6415  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
6416  inoutInfo.usedBytes += srcInfo.usedBytes;
6417  inoutInfo.unusedBytes += srcInfo.unusedBytes;
6418  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
6419  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
6420  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
6421  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
6422 }
6423 
6424 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
6425 {
6426  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
6427  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
6428  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
6429  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
6430 }
6431 
6432 VmaPool_T::VmaPool_T(
6433  VmaAllocator hAllocator,
6434  const VmaPoolCreateInfo& createInfo) :
6435  m_BlockVector(
6436  hAllocator,
6437  createInfo.memoryTypeIndex,
6438  createInfo.blockSize,
6439  createInfo.minBlockCount,
6440  createInfo.maxBlockCount,
6441  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
6442  createInfo.frameInUseCount,
6443  true) // isCustomPool
6444 {
6445 }
6446 
6447 VmaPool_T::~VmaPool_T()
6448 {
6449 }
6450 
6451 #if VMA_STATS_STRING_ENABLED
6452 
6453 #endif // #if VMA_STATS_STRING_ENABLED
6454 
6455 VmaBlockVector::VmaBlockVector(
6456  VmaAllocator hAllocator,
6457  uint32_t memoryTypeIndex,
6458  VkDeviceSize preferredBlockSize,
6459  size_t minBlockCount,
6460  size_t maxBlockCount,
6461  VkDeviceSize bufferImageGranularity,
6462  uint32_t frameInUseCount,
6463  bool isCustomPool) :
6464  m_hAllocator(hAllocator),
6465  m_MemoryTypeIndex(memoryTypeIndex),
6466  m_PreferredBlockSize(preferredBlockSize),
6467  m_MinBlockCount(minBlockCount),
6468  m_MaxBlockCount(maxBlockCount),
6469  m_BufferImageGranularity(bufferImageGranularity),
6470  m_FrameInUseCount(frameInUseCount),
6471  m_IsCustomPool(isCustomPool),
6472  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
6473  m_HasEmptyBlock(false),
6474  m_pDefragmentator(VMA_NULL)
6475 {
6476 }
6477 
6478 VmaBlockVector::~VmaBlockVector()
6479 {
6480  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
6481 
6482  for(size_t i = m_Blocks.size(); i--; )
6483  {
6484  m_Blocks[i]->Destroy(m_hAllocator);
6485  vma_delete(m_hAllocator, m_Blocks[i]);
6486  }
6487 }
6488 
6489 VkResult VmaBlockVector::CreateMinBlocks()
6490 {
6491  for(size_t i = 0; i < m_MinBlockCount; ++i)
6492  {
6493  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
6494  if(res != VK_SUCCESS)
6495  {
6496  return res;
6497  }
6498  }
6499  return VK_SUCCESS;
6500 }
6501 
6502 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
6503 {
6504  pStats->size = 0;
6505  pStats->unusedSize = 0;
6506  pStats->allocationCount = 0;
6507  pStats->unusedRangeCount = 0;
6508  pStats->unusedRangeSizeMax = 0;
6509 
6510  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6511 
6512  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6513  {
6514  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6515  VMA_ASSERT(pBlock);
6516  VMA_HEAVY_ASSERT(pBlock->Validate());
6517  pBlock->m_Metadata.AddPoolStats(*pStats);
6518  }
6519 }
6520 
6521 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
6522 
6523 VkResult VmaBlockVector::Allocate(
6524  VmaPool hCurrentPool,
6525  uint32_t currentFrameIndex,
6526  const VkMemoryRequirements& vkMemReq,
6527  const VmaAllocationCreateInfo& createInfo,
6528  VmaSuballocationType suballocType,
6529  VmaAllocation* pAllocation)
6530 {
6531  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
6532  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
6533 
6534  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6535 
6536  // 1. Search existing allocations. Try to allocate without making other allocations lost.
6537  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6538  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6539  {
6540  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6541  VMA_ASSERT(pCurrBlock);
6542  VmaAllocationRequest currRequest = {};
6543  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6544  currentFrameIndex,
6545  m_FrameInUseCount,
6546  m_BufferImageGranularity,
6547  vkMemReq.size,
6548  vkMemReq.alignment,
6549  suballocType,
6550  false, // canMakeOtherLost
6551  &currRequest))
6552  {
6553  // Allocate from pCurrBlock.
6554  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
6555 
6556  if(mapped)
6557  {
6558  VkResult res = pCurrBlock->Map(m_hAllocator, 1, VMA_NULL);
6559  if(res != VK_SUCCESS)
6560  {
6561  return res;
6562  }
6563  }
6564 
6565  // We no longer have an empty Allocation.
6566  if(pCurrBlock->m_Metadata.IsEmpty())
6567  {
6568  m_HasEmptyBlock = false;
6569  }
6570 
6571  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6572  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
6573  (*pAllocation)->InitBlockAllocation(
6574  hCurrentPool,
6575  pCurrBlock,
6576  currRequest.offset,
6577  vkMemReq.alignment,
6578  vkMemReq.size,
6579  suballocType,
6580  mapped,
6581  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6582  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
6583  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6584  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6585  return VK_SUCCESS;
6586  }
6587  }
6588 
6589  const bool canCreateNewBlock =
6590  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
6591  (m_Blocks.size() < m_MaxBlockCount);
6592 
6593  // 2. Try to create new block.
6594  if(canCreateNewBlock)
6595  {
6596  // Calculate optimal size for new block.
6597  VkDeviceSize newBlockSize = m_PreferredBlockSize;
6598  uint32_t newBlockSizeShift = 0;
6599  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
6600 
6601  // Allocating blocks of other sizes is allowed only in default pools.
6602  // In custom pools block size is fixed.
6603  if(m_IsCustomPool == false)
6604  {
6605  // Allocate 1/8, 1/4, 1/2 as first blocks.
6606  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
6607  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
6608  {
6609  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
6610  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= vkMemReq.size * 2)
6611  {
6612  newBlockSize = smallerNewBlockSize;
6613  ++newBlockSizeShift;
6614  }
6615  else
6616  {
6617  break;
6618  }
6619  }
6620  }
6621 
6622  size_t newBlockIndex = 0;
6623  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
6624  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
6625  if(m_IsCustomPool == false)
6626  {
6627  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
6628  {
6629  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
6630  if(smallerNewBlockSize >= vkMemReq.size)
6631  {
6632  newBlockSize = smallerNewBlockSize;
6633  ++newBlockSizeShift;
6634  res = CreateBlock(newBlockSize, &newBlockIndex);
6635  }
6636  else
6637  {
6638  break;
6639  }
6640  }
6641  }
6642 
6643  if(res == VK_SUCCESS)
6644  {
6645  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
6646  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= vkMemReq.size);
6647 
6648  if(mapped)
6649  {
6650  res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
6651  if(res != VK_SUCCESS)
6652  {
6653  return res;
6654  }
6655  }
6656 
6657  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
6658  VmaAllocationRequest allocRequest;
6659  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
6660  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6661  pBlock->m_Metadata.Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
6662  (*pAllocation)->InitBlockAllocation(
6663  hCurrentPool,
6664  pBlock,
6665  allocRequest.offset,
6666  vkMemReq.alignment,
6667  vkMemReq.size,
6668  suballocType,
6669  mapped,
6670  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6671  VMA_HEAVY_ASSERT(pBlock->Validate());
6672  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
6673  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6674  return VK_SUCCESS;
6675  }
6676  }
6677 
6678  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
6679 
6680  // 3. Try to allocate from existing blocks with making other allocations lost.
6681  if(canMakeOtherLost)
6682  {
6683  uint32_t tryIndex = 0;
6684  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
6685  {
6686  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
6687  VmaAllocationRequest bestRequest = {};
6688  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
6689 
6690  // 1. Search existing allocations.
6691  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6692  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6693  {
6694  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6695  VMA_ASSERT(pCurrBlock);
6696  VmaAllocationRequest currRequest = {};
6697  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6698  currentFrameIndex,
6699  m_FrameInUseCount,
6700  m_BufferImageGranularity,
6701  vkMemReq.size,
6702  vkMemReq.alignment,
6703  suballocType,
6704  canMakeOtherLost,
6705  &currRequest))
6706  {
6707  const VkDeviceSize currRequestCost = currRequest.CalcCost();
6708  if(pBestRequestBlock == VMA_NULL ||
6709  currRequestCost < bestRequestCost)
6710  {
6711  pBestRequestBlock = pCurrBlock;
6712  bestRequest = currRequest;
6713  bestRequestCost = currRequestCost;
6714 
6715  if(bestRequestCost == 0)
6716  {
6717  break;
6718  }
6719  }
6720  }
6721  }
6722 
6723  if(pBestRequestBlock != VMA_NULL)
6724  {
6725  if(mapped)
6726  {
6727  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
6728  if(res != VK_SUCCESS)
6729  {
6730  return res;
6731  }
6732  }
6733 
6734  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
6735  currentFrameIndex,
6736  m_FrameInUseCount,
6737  &bestRequest))
6738  {
6739  // We no longer have an empty Allocation.
6740  if(pBestRequestBlock->m_Metadata.IsEmpty())
6741  {
6742  m_HasEmptyBlock = false;
6743  }
6744  // Allocate from this pBlock.
6745  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6746  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
6747  (*pAllocation)->InitBlockAllocation(
6748  hCurrentPool,
6749  pBestRequestBlock,
6750  bestRequest.offset,
6751  vkMemReq.alignment,
6752  vkMemReq.size,
6753  suballocType,
6754  mapped,
6755  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6756  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
6757  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6758  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6759  return VK_SUCCESS;
6760  }
6761  // else: Some allocations must have been touched while we are here. Next try.
6762  }
6763  else
6764  {
6765  // Could not find place in any of the blocks - break outer loop.
6766  break;
6767  }
6768  }
6769  /* Maximum number of tries exceeded - a very unlike event when many other
6770  threads are simultaneously touching allocations making it impossible to make
6771  lost at the same time as we try to allocate. */
6772  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
6773  {
6774  return VK_ERROR_TOO_MANY_OBJECTS;
6775  }
6776  }
6777 
6778  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6779 }
6780 
6781 void VmaBlockVector::Free(
6782  VmaAllocation hAllocation)
6783 {
6784  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
6785 
6786  // Scope for lock.
6787  {
6788  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6789 
6790  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
6791 
6792  if(hAllocation->IsPersistentMap())
6793  {
6794  pBlock->Unmap(m_hAllocator, 1);
6795  }
6796 
6797  pBlock->m_Metadata.Free(hAllocation);
6798  VMA_HEAVY_ASSERT(pBlock->Validate());
6799 
6800  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
6801 
6802  // pBlock became empty after this deallocation.
6803  if(pBlock->m_Metadata.IsEmpty())
6804  {
6805  // Already has empty Allocation. We don't want to have two, so delete this one.
6806  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
6807  {
6808  pBlockToDelete = pBlock;
6809  Remove(pBlock);
6810  }
6811  // We now have first empty Allocation.
6812  else
6813  {
6814  m_HasEmptyBlock = true;
6815  }
6816  }
6817  // pBlock didn't become empty, but we have another empty block - find and free that one.
6818  // (This is optional, heuristics.)
6819  else if(m_HasEmptyBlock)
6820  {
6821  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
6822  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
6823  {
6824  pBlockToDelete = pLastBlock;
6825  m_Blocks.pop_back();
6826  m_HasEmptyBlock = false;
6827  }
6828  }
6829 
6830  IncrementallySortBlocks();
6831  }
6832 
6833  // Destruction of a free Allocation. Deferred until this point, outside of mutex
6834  // lock, for performance reason.
6835  if(pBlockToDelete != VMA_NULL)
6836  {
6837  VMA_DEBUG_LOG(" Deleted empty allocation");
6838  pBlockToDelete->Destroy(m_hAllocator);
6839  vma_delete(m_hAllocator, pBlockToDelete);
6840  }
6841 }
6842 
6843 size_t VmaBlockVector::CalcMaxBlockSize() const
6844 {
6845  size_t result = 0;
6846  for(size_t i = m_Blocks.size(); i--; )
6847  {
6848  result = VMA_MAX((uint64_t)result, (uint64_t)m_Blocks[i]->m_Metadata.GetSize());
6849  if(result >= m_PreferredBlockSize)
6850  {
6851  break;
6852  }
6853  }
6854  return result;
6855 }
6856 
6857 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
6858 {
6859  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6860  {
6861  if(m_Blocks[blockIndex] == pBlock)
6862  {
6863  VmaVectorRemove(m_Blocks, blockIndex);
6864  return;
6865  }
6866  }
6867  VMA_ASSERT(0);
6868 }
6869 
6870 void VmaBlockVector::IncrementallySortBlocks()
6871 {
6872  // Bubble sort only until first swap.
6873  for(size_t i = 1; i < m_Blocks.size(); ++i)
6874  {
6875  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
6876  {
6877  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
6878  return;
6879  }
6880  }
6881 }
6882 
6883 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
6884 {
6885  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
6886  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
6887  allocInfo.allocationSize = blockSize;
6888  VkDeviceMemory mem = VK_NULL_HANDLE;
6889  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
6890  if(res < 0)
6891  {
6892  return res;
6893  }
6894 
6895  // New VkDeviceMemory successfully created.
6896 
6897  // Create new Allocation for it.
6898  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
6899  pBlock->Init(
6900  m_MemoryTypeIndex,
6901  mem,
6902  allocInfo.allocationSize);
6903 
6904  m_Blocks.push_back(pBlock);
6905  if(pNewBlockIndex != VMA_NULL)
6906  {
6907  *pNewBlockIndex = m_Blocks.size() - 1;
6908  }
6909 
6910  return VK_SUCCESS;
6911 }
6912 
6913 #if VMA_STATS_STRING_ENABLED
6914 
6915 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
6916 {
6917  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6918 
6919  json.BeginObject();
6920 
6921  if(m_IsCustomPool)
6922  {
6923  json.WriteString("MemoryTypeIndex");
6924  json.WriteNumber(m_MemoryTypeIndex);
6925 
6926  json.WriteString("BlockSize");
6927  json.WriteNumber(m_PreferredBlockSize);
6928 
6929  json.WriteString("BlockCount");
6930  json.BeginObject(true);
6931  if(m_MinBlockCount > 0)
6932  {
6933  json.WriteString("Min");
6934  json.WriteNumber((uint64_t)m_MinBlockCount);
6935  }
6936  if(m_MaxBlockCount < SIZE_MAX)
6937  {
6938  json.WriteString("Max");
6939  json.WriteNumber((uint64_t)m_MaxBlockCount);
6940  }
6941  json.WriteString("Cur");
6942  json.WriteNumber((uint64_t)m_Blocks.size());
6943  json.EndObject();
6944 
6945  if(m_FrameInUseCount > 0)
6946  {
6947  json.WriteString("FrameInUseCount");
6948  json.WriteNumber(m_FrameInUseCount);
6949  }
6950  }
6951  else
6952  {
6953  json.WriteString("PreferredBlockSize");
6954  json.WriteNumber(m_PreferredBlockSize);
6955  }
6956 
6957  json.WriteString("Blocks");
6958  json.BeginArray();
6959  for(size_t i = 0; i < m_Blocks.size(); ++i)
6960  {
6961  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
6962  }
6963  json.EndArray();
6964 
6965  json.EndObject();
6966 }
6967 
6968 #endif // #if VMA_STATS_STRING_ENABLED
6969 
6970 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
6971  VmaAllocator hAllocator,
6972  uint32_t currentFrameIndex)
6973 {
6974  if(m_pDefragmentator == VMA_NULL)
6975  {
6976  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
6977  hAllocator,
6978  this,
6979  currentFrameIndex);
6980  }
6981 
6982  return m_pDefragmentator;
6983 }
6984 
6985 VkResult VmaBlockVector::Defragment(
6986  VmaDefragmentationStats* pDefragmentationStats,
6987  VkDeviceSize& maxBytesToMove,
6988  uint32_t& maxAllocationsToMove)
6989 {
6990  if(m_pDefragmentator == VMA_NULL)
6991  {
6992  return VK_SUCCESS;
6993  }
6994 
6995  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6996 
6997  // Defragment.
6998  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
6999 
7000  // Accumulate statistics.
7001  if(pDefragmentationStats != VMA_NULL)
7002  {
7003  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
7004  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
7005  pDefragmentationStats->bytesMoved += bytesMoved;
7006  pDefragmentationStats->allocationsMoved += allocationsMoved;
7007  VMA_ASSERT(bytesMoved <= maxBytesToMove);
7008  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
7009  maxBytesToMove -= bytesMoved;
7010  maxAllocationsToMove -= allocationsMoved;
7011  }
7012 
7013  // Free empty blocks.
7014  m_HasEmptyBlock = false;
7015  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
7016  {
7017  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
7018  if(pBlock->m_Metadata.IsEmpty())
7019  {
7020  if(m_Blocks.size() > m_MinBlockCount)
7021  {
7022  if(pDefragmentationStats != VMA_NULL)
7023  {
7024  ++pDefragmentationStats->deviceMemoryBlocksFreed;
7025  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
7026  }
7027 
7028  VmaVectorRemove(m_Blocks, blockIndex);
7029  pBlock->Destroy(m_hAllocator);
7030  vma_delete(m_hAllocator, pBlock);
7031  }
7032  else
7033  {
7034  m_HasEmptyBlock = true;
7035  }
7036  }
7037  }
7038 
7039  return result;
7040 }
7041 
7042 void VmaBlockVector::DestroyDefragmentator()
7043 {
7044  if(m_pDefragmentator != VMA_NULL)
7045  {
7046  vma_delete(m_hAllocator, m_pDefragmentator);
7047  m_pDefragmentator = VMA_NULL;
7048  }
7049 }
7050 
7051 void VmaBlockVector::MakePoolAllocationsLost(
7052  uint32_t currentFrameIndex,
7053  size_t* pLostAllocationCount)
7054 {
7055  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
7056  size_t lostAllocationCount = 0;
7057  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
7058  {
7059  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
7060  VMA_ASSERT(pBlock);
7061  lostAllocationCount += pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
7062  }
7063  if(pLostAllocationCount != VMA_NULL)
7064  {
7065  *pLostAllocationCount = lostAllocationCount;
7066  }
7067 }
7068 
7069 void VmaBlockVector::AddStats(VmaStats* pStats)
7070 {
7071  const uint32_t memTypeIndex = m_MemoryTypeIndex;
7072  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
7073 
7074  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
7075 
7076  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
7077  {
7078  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
7079  VMA_ASSERT(pBlock);
7080  VMA_HEAVY_ASSERT(pBlock->Validate());
7081  VmaStatInfo allocationStatInfo;
7082  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
7083  VmaAddStatInfo(pStats->total, allocationStatInfo);
7084  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
7085  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
7086  }
7087 }
7088 
7090 // VmaDefragmentator members definition
7091 
7092 VmaDefragmentator::VmaDefragmentator(
7093  VmaAllocator hAllocator,
7094  VmaBlockVector* pBlockVector,
7095  uint32_t currentFrameIndex) :
7096  m_hAllocator(hAllocator),
7097  m_pBlockVector(pBlockVector),
7098  m_CurrentFrameIndex(currentFrameIndex),
7099  m_BytesMoved(0),
7100  m_AllocationsMoved(0),
7101  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
7102  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
7103 {
7104 }
7105 
7106 VmaDefragmentator::~VmaDefragmentator()
7107 {
7108  for(size_t i = m_Blocks.size(); i--; )
7109  {
7110  vma_delete(m_hAllocator, m_Blocks[i]);
7111  }
7112 }
7113 
7114 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
7115 {
7116  AllocationInfo allocInfo;
7117  allocInfo.m_hAllocation = hAlloc;
7118  allocInfo.m_pChanged = pChanged;
7119  m_Allocations.push_back(allocInfo);
7120 }
7121 
7122 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
7123 {
7124  // It has already been mapped for defragmentation.
7125  if(m_pMappedDataForDefragmentation)
7126  {
7127  *ppMappedData = m_pMappedDataForDefragmentation;
7128  return VK_SUCCESS;
7129  }
7130 
7131  // It is originally mapped.
7132  if(m_pBlock->GetMappedData())
7133  {
7134  *ppMappedData = m_pBlock->GetMappedData();
7135  return VK_SUCCESS;
7136  }
7137 
7138  // Map on first usage.
7139  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
7140  *ppMappedData = m_pMappedDataForDefragmentation;
7141  return res;
7142 }
7143 
7144 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
7145 {
7146  if(m_pMappedDataForDefragmentation != VMA_NULL)
7147  {
7148  m_pBlock->Unmap(hAllocator, 1);
7149  }
7150 }
7151 
7152 VkResult VmaDefragmentator::DefragmentRound(
7153  VkDeviceSize maxBytesToMove,
7154  uint32_t maxAllocationsToMove)
7155 {
7156  if(m_Blocks.empty())
7157  {
7158  return VK_SUCCESS;
7159  }
7160 
7161  size_t srcBlockIndex = m_Blocks.size() - 1;
7162  size_t srcAllocIndex = SIZE_MAX;
7163  for(;;)
7164  {
7165  // 1. Find next allocation to move.
7166  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
7167  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
7168  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
7169  {
7170  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
7171  {
7172  // Finished: no more allocations to process.
7173  if(srcBlockIndex == 0)
7174  {
7175  return VK_SUCCESS;
7176  }
7177  else
7178  {
7179  --srcBlockIndex;
7180  srcAllocIndex = SIZE_MAX;
7181  }
7182  }
7183  else
7184  {
7185  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
7186  }
7187  }
7188 
7189  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
7190  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
7191 
7192  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
7193  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
7194  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
7195  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
7196 
7197  // 2. Try to find new place for this allocation in preceding or current block.
7198  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
7199  {
7200  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
7201  VmaAllocationRequest dstAllocRequest;
7202  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
7203  m_CurrentFrameIndex,
7204  m_pBlockVector->GetFrameInUseCount(),
7205  m_pBlockVector->GetBufferImageGranularity(),
7206  size,
7207  alignment,
7208  suballocType,
7209  false, // canMakeOtherLost
7210  &dstAllocRequest) &&
7211  MoveMakesSense(
7212  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
7213  {
7214  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
7215 
7216  // Reached limit on number of allocations or bytes to move.
7217  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
7218  (m_BytesMoved + size > maxBytesToMove))
7219  {
7220  return VK_INCOMPLETE;
7221  }
7222 
7223  void* pDstMappedData = VMA_NULL;
7224  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
7225  if(res != VK_SUCCESS)
7226  {
7227  return res;
7228  }
7229 
7230  void* pSrcMappedData = VMA_NULL;
7231  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
7232  if(res != VK_SUCCESS)
7233  {
7234  return res;
7235  }
7236 
7237  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
7238  memcpy(
7239  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
7240  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
7241  static_cast<size_t>(size));
7242 
7243  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
7244  pSrcBlockInfo->m_pBlock->m_Metadata.FreeAtOffset(srcOffset);
7245 
7246  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
7247 
7248  if(allocInfo.m_pChanged != VMA_NULL)
7249  {
7250  *allocInfo.m_pChanged = VK_TRUE;
7251  }
7252 
7253  ++m_AllocationsMoved;
7254  m_BytesMoved += size;
7255 
7256  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
7257 
7258  break;
7259  }
7260  }
7261 
7262  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
7263 
7264  if(srcAllocIndex > 0)
7265  {
7266  --srcAllocIndex;
7267  }
7268  else
7269  {
7270  if(srcBlockIndex > 0)
7271  {
7272  --srcBlockIndex;
7273  srcAllocIndex = SIZE_MAX;
7274  }
7275  else
7276  {
7277  return VK_SUCCESS;
7278  }
7279  }
7280  }
7281 }
7282 
7283 VkResult VmaDefragmentator::Defragment(
7284  VkDeviceSize maxBytesToMove,
7285  uint32_t maxAllocationsToMove)
7286 {
7287  if(m_Allocations.empty())
7288  {
7289  return VK_SUCCESS;
7290  }
7291 
7292  // Create block info for each block.
7293  const size_t blockCount = m_pBlockVector->m_Blocks.size();
7294  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7295  {
7296  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
7297  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
7298  m_Blocks.push_back(pBlockInfo);
7299  }
7300 
7301  // Sort them by m_pBlock pointer value.
7302  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
7303 
7304  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
7305  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
7306  {
7307  AllocationInfo& allocInfo = m_Allocations[blockIndex];
7308  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
7309  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
7310  {
7311  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
7312  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
7313  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
7314  {
7315  (*it)->m_Allocations.push_back(allocInfo);
7316  }
7317  else
7318  {
7319  VMA_ASSERT(0);
7320  }
7321  }
7322  }
7323  m_Allocations.clear();
7324 
7325  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7326  {
7327  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
7328  pBlockInfo->CalcHasNonMovableAllocations();
7329  pBlockInfo->SortAllocationsBySizeDescecnding();
7330  }
7331 
7332  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
7333  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
7334 
7335  // Execute defragmentation rounds (the main part).
7336  VkResult result = VK_SUCCESS;
7337  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
7338  {
7339  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
7340  }
7341 
7342  // Unmap blocks that were mapped for defragmentation.
7343  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7344  {
7345  m_Blocks[blockIndex]->Unmap(m_hAllocator);
7346  }
7347 
7348  return result;
7349 }
7350 
7351 bool VmaDefragmentator::MoveMakesSense(
7352  size_t dstBlockIndex, VkDeviceSize dstOffset,
7353  size_t srcBlockIndex, VkDeviceSize srcOffset)
7354 {
7355  if(dstBlockIndex < srcBlockIndex)
7356  {
7357  return true;
7358  }
7359  if(dstBlockIndex > srcBlockIndex)
7360  {
7361  return false;
7362  }
7363  if(dstOffset < srcOffset)
7364  {
7365  return true;
7366  }
7367  return false;
7368 }
7369 
7371 // VmaAllocator_T
7372 
7373 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
7374  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
7375  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
7376  m_hDevice(pCreateInfo->device),
7377  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
7378  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
7379  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
7380  m_PreferredLargeHeapBlockSize(0),
7381  m_PhysicalDevice(pCreateInfo->physicalDevice),
7382  m_CurrentFrameIndex(0),
7383  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
7384 {
7385  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
7386 
7387  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
7388  memset(&m_MemProps, 0, sizeof(m_MemProps));
7389  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
7390 
7391  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
7392  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
7393 
7394  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7395  {
7396  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
7397  }
7398 
7399  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
7400  {
7401  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
7402  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
7403  }
7404 
7405  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
7406 
7407  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
7408  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
7409 
7410  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
7411  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
7412 
7413  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
7414  {
7415  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
7416  {
7417  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
7418  if(limit != VK_WHOLE_SIZE)
7419  {
7420  m_HeapSizeLimit[heapIndex] = limit;
7421  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
7422  {
7423  m_MemProps.memoryHeaps[heapIndex].size = limit;
7424  }
7425  }
7426  }
7427  }
7428 
7429  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7430  {
7431  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
7432 
7433  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
7434  this,
7435  memTypeIndex,
7436  preferredBlockSize,
7437  0,
7438  SIZE_MAX,
7439  GetBufferImageGranularity(),
7440  pCreateInfo->frameInUseCount,
7441  false); // isCustomPool
7442  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
7443  // becase minBlockCount is 0.
7444  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
7445  }
7446 }
7447 
7448 VmaAllocator_T::~VmaAllocator_T()
7449 {
7450  VMA_ASSERT(m_Pools.empty());
7451 
7452  for(size_t i = GetMemoryTypeCount(); i--; )
7453  {
7454  vma_delete(this, m_pDedicatedAllocations[i]);
7455  vma_delete(this, m_pBlockVectors[i]);
7456  }
7457 }
7458 
7459 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
7460 {
7461 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7462  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
7463  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
7464  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
7465  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
7466  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
7467  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
7468  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
7469  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
7470  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
7471  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
7472  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
7473  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
7474  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
7475  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
7476  if(m_UseKhrDedicatedAllocation)
7477  {
7478  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
7479  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
7480  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
7481  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
7482  }
7483 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7484 
7485 #define VMA_COPY_IF_NOT_NULL(funcName) \
7486  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
7487 
7488  if(pVulkanFunctions != VMA_NULL)
7489  {
7490  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
7491  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
7492  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
7493  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
7494  VMA_COPY_IF_NOT_NULL(vkMapMemory);
7495  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
7496  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
7497  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
7498  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
7499  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
7500  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
7501  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
7502  VMA_COPY_IF_NOT_NULL(vkCreateImage);
7503  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
7504  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
7505  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
7506  }
7507 
7508 #undef VMA_COPY_IF_NOT_NULL
7509 
7510  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
7511  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
7512  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
7513  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
7514  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
7515  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
7516  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
7517  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
7518  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
7519  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
7520  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
7521  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
7522  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
7523  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
7524  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
7525  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
7526  if(m_UseKhrDedicatedAllocation)
7527  {
7528  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
7529  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
7530  }
7531 }
7532 
7533 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
7534 {
7535  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7536  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
7537  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
7538  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
7539 }
7540 
7541 VkResult VmaAllocator_T::AllocateMemoryOfType(
7542  const VkMemoryRequirements& vkMemReq,
7543  bool dedicatedAllocation,
7544  VkBuffer dedicatedBuffer,
7545  VkImage dedicatedImage,
7546  const VmaAllocationCreateInfo& createInfo,
7547  uint32_t memTypeIndex,
7548  VmaSuballocationType suballocType,
7549  VmaAllocation* pAllocation)
7550 {
7551  VMA_ASSERT(pAllocation != VMA_NULL);
7552  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
7553 
7554  VmaAllocationCreateInfo finalCreateInfo = createInfo;
7555 
7556  // If memory type is not HOST_VISIBLE, disable MAPPED.
7557  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7558  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
7559  {
7560  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
7561  }
7562 
7563  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
7564  VMA_ASSERT(blockVector);
7565 
7566  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
7567  bool preferDedicatedMemory =
7568  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
7569  dedicatedAllocation ||
7570  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
7571  vkMemReq.size > preferredBlockSize / 2;
7572 
7573  if(preferDedicatedMemory &&
7574  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
7575  finalCreateInfo.pool == VK_NULL_HANDLE)
7576  {
7578  }
7579 
7580  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
7581  {
7582  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7583  {
7584  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7585  }
7586  else
7587  {
7588  return AllocateDedicatedMemory(
7589  vkMemReq.size,
7590  suballocType,
7591  memTypeIndex,
7592  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
7593  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
7594  finalCreateInfo.pUserData,
7595  dedicatedBuffer,
7596  dedicatedImage,
7597  pAllocation);
7598  }
7599  }
7600  else
7601  {
7602  VkResult res = blockVector->Allocate(
7603  VK_NULL_HANDLE, // hCurrentPool
7604  m_CurrentFrameIndex.load(),
7605  vkMemReq,
7606  finalCreateInfo,
7607  suballocType,
7608  pAllocation);
7609  if(res == VK_SUCCESS)
7610  {
7611  return res;
7612  }
7613 
7614  // 5. Try dedicated memory.
7615  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7616  {
7617  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7618  }
7619  else
7620  {
7621  res = AllocateDedicatedMemory(
7622  vkMemReq.size,
7623  suballocType,
7624  memTypeIndex,
7625  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
7626  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
7627  finalCreateInfo.pUserData,
7628  dedicatedBuffer,
7629  dedicatedImage,
7630  pAllocation);
7631  if(res == VK_SUCCESS)
7632  {
7633  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
7634  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
7635  return VK_SUCCESS;
7636  }
7637  else
7638  {
7639  // Everything failed: Return error code.
7640  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7641  return res;
7642  }
7643  }
7644  }
7645 }
7646 
7647 VkResult VmaAllocator_T::AllocateDedicatedMemory(
7648  VkDeviceSize size,
7649  VmaSuballocationType suballocType,
7650  uint32_t memTypeIndex,
7651  bool map,
7652  bool isUserDataString,
7653  void* pUserData,
7654  VkBuffer dedicatedBuffer,
7655  VkImage dedicatedImage,
7656  VmaAllocation* pAllocation)
7657 {
7658  VMA_ASSERT(pAllocation);
7659 
7660  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
7661  allocInfo.memoryTypeIndex = memTypeIndex;
7662  allocInfo.allocationSize = size;
7663 
7664  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
7665  if(m_UseKhrDedicatedAllocation)
7666  {
7667  if(dedicatedBuffer != VK_NULL_HANDLE)
7668  {
7669  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
7670  dedicatedAllocInfo.buffer = dedicatedBuffer;
7671  allocInfo.pNext = &dedicatedAllocInfo;
7672  }
7673  else if(dedicatedImage != VK_NULL_HANDLE)
7674  {
7675  dedicatedAllocInfo.image = dedicatedImage;
7676  allocInfo.pNext = &dedicatedAllocInfo;
7677  }
7678  }
7679 
7680  // Allocate VkDeviceMemory.
7681  VkDeviceMemory hMemory = VK_NULL_HANDLE;
7682  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
7683  if(res < 0)
7684  {
7685  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7686  return res;
7687  }
7688 
7689  void* pMappedData = VMA_NULL;
7690  if(map)
7691  {
7692  res = (*m_VulkanFunctions.vkMapMemory)(
7693  m_hDevice,
7694  hMemory,
7695  0,
7696  VK_WHOLE_SIZE,
7697  0,
7698  &pMappedData);
7699  if(res < 0)
7700  {
7701  VMA_DEBUG_LOG(" vkMapMemory FAILED");
7702  FreeVulkanMemory(memTypeIndex, size, hMemory);
7703  return res;
7704  }
7705  }
7706 
7707  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
7708  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
7709  (*pAllocation)->SetUserData(this, pUserData);
7710 
7711  // Register it in m_pDedicatedAllocations.
7712  {
7713  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7714  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
7715  VMA_ASSERT(pDedicatedAllocations);
7716  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
7717  }
7718 
7719  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
7720 
7721  return VK_SUCCESS;
7722 }
7723 
7724 void VmaAllocator_T::GetBufferMemoryRequirements(
7725  VkBuffer hBuffer,
7726  VkMemoryRequirements& memReq,
7727  bool& requiresDedicatedAllocation,
7728  bool& prefersDedicatedAllocation) const
7729 {
7730  if(m_UseKhrDedicatedAllocation)
7731  {
7732  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
7733  memReqInfo.buffer = hBuffer;
7734 
7735  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7736 
7737  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7738  memReq2.pNext = &memDedicatedReq;
7739 
7740  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7741 
7742  memReq = memReq2.memoryRequirements;
7743  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7744  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7745  }
7746  else
7747  {
7748  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
7749  requiresDedicatedAllocation = false;
7750  prefersDedicatedAllocation = false;
7751  }
7752 }
7753 
7754 void VmaAllocator_T::GetImageMemoryRequirements(
7755  VkImage hImage,
7756  VkMemoryRequirements& memReq,
7757  bool& requiresDedicatedAllocation,
7758  bool& prefersDedicatedAllocation) const
7759 {
7760  if(m_UseKhrDedicatedAllocation)
7761  {
7762  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
7763  memReqInfo.image = hImage;
7764 
7765  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7766 
7767  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7768  memReq2.pNext = &memDedicatedReq;
7769 
7770  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7771 
7772  memReq = memReq2.memoryRequirements;
7773  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7774  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7775  }
7776  else
7777  {
7778  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
7779  requiresDedicatedAllocation = false;
7780  prefersDedicatedAllocation = false;
7781  }
7782 }
7783 
7784 VkResult VmaAllocator_T::AllocateMemory(
7785  const VkMemoryRequirements& vkMemReq,
7786  bool requiresDedicatedAllocation,
7787  bool prefersDedicatedAllocation,
7788  VkBuffer dedicatedBuffer,
7789  VkImage dedicatedImage,
7790  const VmaAllocationCreateInfo& createInfo,
7791  VmaSuballocationType suballocType,
7792  VmaAllocation* pAllocation)
7793 {
7794  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
7795  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7796  {
7797  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
7798  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7799  }
7800  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7802  {
7803  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
7804  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7805  }
7806  if(requiresDedicatedAllocation)
7807  {
7808  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7809  {
7810  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
7811  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7812  }
7813  if(createInfo.pool != VK_NULL_HANDLE)
7814  {
7815  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
7816  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7817  }
7818  }
7819  if((createInfo.pool != VK_NULL_HANDLE) &&
7820  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
7821  {
7822  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
7823  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7824  }
7825 
7826  if(createInfo.pool != VK_NULL_HANDLE)
7827  {
7828  return createInfo.pool->m_BlockVector.Allocate(
7829  createInfo.pool,
7830  m_CurrentFrameIndex.load(),
7831  vkMemReq,
7832  createInfo,
7833  suballocType,
7834  pAllocation);
7835  }
7836  else
7837  {
7838  // Bit mask of memory Vulkan types acceptable for this allocation.
7839  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
7840  uint32_t memTypeIndex = UINT32_MAX;
7841  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7842  if(res == VK_SUCCESS)
7843  {
7844  res = AllocateMemoryOfType(
7845  vkMemReq,
7846  requiresDedicatedAllocation || prefersDedicatedAllocation,
7847  dedicatedBuffer,
7848  dedicatedImage,
7849  createInfo,
7850  memTypeIndex,
7851  suballocType,
7852  pAllocation);
7853  // Succeeded on first try.
7854  if(res == VK_SUCCESS)
7855  {
7856  return res;
7857  }
7858  // Allocation from this memory type failed. Try other compatible memory types.
7859  else
7860  {
7861  for(;;)
7862  {
7863  // Remove old memTypeIndex from list of possibilities.
7864  memoryTypeBits &= ~(1u << memTypeIndex);
7865  // Find alternative memTypeIndex.
7866  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7867  if(res == VK_SUCCESS)
7868  {
7869  res = AllocateMemoryOfType(
7870  vkMemReq,
7871  requiresDedicatedAllocation || prefersDedicatedAllocation,
7872  dedicatedBuffer,
7873  dedicatedImage,
7874  createInfo,
7875  memTypeIndex,
7876  suballocType,
7877  pAllocation);
7878  // Allocation from this alternative memory type succeeded.
7879  if(res == VK_SUCCESS)
7880  {
7881  return res;
7882  }
7883  // else: Allocation from this memory type failed. Try next one - next loop iteration.
7884  }
7885  // No other matching memory type index could be found.
7886  else
7887  {
7888  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
7889  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7890  }
7891  }
7892  }
7893  }
7894  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
7895  else
7896  return res;
7897  }
7898 }
7899 
7900 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
7901 {
7902  VMA_ASSERT(allocation);
7903 
7904  if(allocation->CanBecomeLost() == false ||
7905  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
7906  {
7907  switch(allocation->GetType())
7908  {
7909  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7910  {
7911  VmaBlockVector* pBlockVector = VMA_NULL;
7912  VmaPool hPool = allocation->GetPool();
7913  if(hPool != VK_NULL_HANDLE)
7914  {
7915  pBlockVector = &hPool->m_BlockVector;
7916  }
7917  else
7918  {
7919  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
7920  pBlockVector = m_pBlockVectors[memTypeIndex];
7921  }
7922  pBlockVector->Free(allocation);
7923  }
7924  break;
7925  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7926  FreeDedicatedMemory(allocation);
7927  break;
7928  default:
7929  VMA_ASSERT(0);
7930  }
7931  }
7932 
7933  allocation->SetUserData(this, VMA_NULL);
7934  vma_delete(this, allocation);
7935 }
7936 
7937 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
7938 {
7939  // Initialize.
7940  InitStatInfo(pStats->total);
7941  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
7942  InitStatInfo(pStats->memoryType[i]);
7943  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7944  InitStatInfo(pStats->memoryHeap[i]);
7945 
7946  // Process default pools.
7947  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7948  {
7949  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
7950  VMA_ASSERT(pBlockVector);
7951  pBlockVector->AddStats(pStats);
7952  }
7953 
7954  // Process custom pools.
7955  {
7956  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7957  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
7958  {
7959  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
7960  }
7961  }
7962 
7963  // Process dedicated allocations.
7964  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7965  {
7966  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7967  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7968  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
7969  VMA_ASSERT(pDedicatedAllocVector);
7970  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
7971  {
7972  VmaStatInfo allocationStatInfo;
7973  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
7974  VmaAddStatInfo(pStats->total, allocationStatInfo);
7975  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
7976  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
7977  }
7978  }
7979 
7980  // Postprocess.
7981  VmaPostprocessCalcStatInfo(pStats->total);
7982  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
7983  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
7984  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
7985  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
7986 }
7987 
7988 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
7989 
7990 VkResult VmaAllocator_T::Defragment(
7991  VmaAllocation* pAllocations,
7992  size_t allocationCount,
7993  VkBool32* pAllocationsChanged,
7994  const VmaDefragmentationInfo* pDefragmentationInfo,
7995  VmaDefragmentationStats* pDefragmentationStats)
7996 {
7997  if(pAllocationsChanged != VMA_NULL)
7998  {
7999  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
8000  }
8001  if(pDefragmentationStats != VMA_NULL)
8002  {
8003  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
8004  }
8005 
8006  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
8007 
8008  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
8009 
8010  const size_t poolCount = m_Pools.size();
8011 
8012  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
8013  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
8014  {
8015  VmaAllocation hAlloc = pAllocations[allocIndex];
8016  VMA_ASSERT(hAlloc);
8017  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
8018  // DedicatedAlloc cannot be defragmented.
8019  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
8020  // Only HOST_VISIBLE memory types can be defragmented.
8021  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
8022  // Lost allocation cannot be defragmented.
8023  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
8024  {
8025  VmaBlockVector* pAllocBlockVector = VMA_NULL;
8026 
8027  const VmaPool hAllocPool = hAlloc->GetPool();
8028  // This allocation belongs to custom pool.
8029  if(hAllocPool != VK_NULL_HANDLE)
8030  {
8031  pAllocBlockVector = &hAllocPool->GetBlockVector();
8032  }
8033  // This allocation belongs to general pool.
8034  else
8035  {
8036  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
8037  }
8038 
8039  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
8040 
8041  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
8042  &pAllocationsChanged[allocIndex] : VMA_NULL;
8043  pDefragmentator->AddAllocation(hAlloc, pChanged);
8044  }
8045  }
8046 
8047  VkResult result = VK_SUCCESS;
8048 
8049  // ======== Main processing.
8050 
8051  VkDeviceSize maxBytesToMove = SIZE_MAX;
8052  uint32_t maxAllocationsToMove = UINT32_MAX;
8053  if(pDefragmentationInfo != VMA_NULL)
8054  {
8055  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
8056  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
8057  }
8058 
8059  // Process standard memory.
8060  for(uint32_t memTypeIndex = 0;
8061  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
8062  ++memTypeIndex)
8063  {
8064  // Only HOST_VISIBLE memory types can be defragmented.
8065  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
8066  {
8067  result = m_pBlockVectors[memTypeIndex]->Defragment(
8068  pDefragmentationStats,
8069  maxBytesToMove,
8070  maxAllocationsToMove);
8071  }
8072  }
8073 
8074  // Process custom pools.
8075  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
8076  {
8077  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
8078  pDefragmentationStats,
8079  maxBytesToMove,
8080  maxAllocationsToMove);
8081  }
8082 
8083  // ======== Destroy defragmentators.
8084 
8085  // Process custom pools.
8086  for(size_t poolIndex = poolCount; poolIndex--; )
8087  {
8088  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
8089  }
8090 
8091  // Process standard memory.
8092  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
8093  {
8094  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
8095  {
8096  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
8097  }
8098  }
8099 
8100  return result;
8101 }
8102 
8103 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
8104 {
8105  if(hAllocation->CanBecomeLost())
8106  {
8107  /*
8108  Warning: This is a carefully designed algorithm.
8109  Do not modify unless you really know what you're doing :)
8110  */
8111  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
8112  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
8113  for(;;)
8114  {
8115  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
8116  {
8117  pAllocationInfo->memoryType = UINT32_MAX;
8118  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
8119  pAllocationInfo->offset = 0;
8120  pAllocationInfo->size = hAllocation->GetSize();
8121  pAllocationInfo->pMappedData = VMA_NULL;
8122  pAllocationInfo->pUserData = hAllocation->GetUserData();
8123  return;
8124  }
8125  else if(localLastUseFrameIndex == localCurrFrameIndex)
8126  {
8127  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
8128  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
8129  pAllocationInfo->offset = hAllocation->GetOffset();
8130  pAllocationInfo->size = hAllocation->GetSize();
8131  pAllocationInfo->pMappedData = VMA_NULL;
8132  pAllocationInfo->pUserData = hAllocation->GetUserData();
8133  return;
8134  }
8135  else // Last use time earlier than current time.
8136  {
8137  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
8138  {
8139  localLastUseFrameIndex = localCurrFrameIndex;
8140  }
8141  }
8142  }
8143  }
8144  else
8145  {
8146  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
8147  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
8148  pAllocationInfo->offset = hAllocation->GetOffset();
8149  pAllocationInfo->size = hAllocation->GetSize();
8150  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
8151  pAllocationInfo->pUserData = hAllocation->GetUserData();
8152  }
8153 }
8154 
8155 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
8156 {
8157  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
8158  if(hAllocation->CanBecomeLost())
8159  {
8160  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
8161  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
8162  for(;;)
8163  {
8164  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
8165  {
8166  return false;
8167  }
8168  else if(localLastUseFrameIndex == localCurrFrameIndex)
8169  {
8170  return true;
8171  }
8172  else // Last use time earlier than current time.
8173  {
8174  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
8175  {
8176  localLastUseFrameIndex = localCurrFrameIndex;
8177  }
8178  }
8179  }
8180  }
8181  else
8182  {
8183  return true;
8184  }
8185 }
8186 
8187 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
8188 {
8189  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
8190 
8191  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
8192 
8193  if(newCreateInfo.maxBlockCount == 0)
8194  {
8195  newCreateInfo.maxBlockCount = SIZE_MAX;
8196  }
8197  if(newCreateInfo.blockSize == 0)
8198  {
8199  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
8200  }
8201 
8202  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
8203 
8204  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
8205  if(res != VK_SUCCESS)
8206  {
8207  vma_delete(this, *pPool);
8208  *pPool = VMA_NULL;
8209  return res;
8210  }
8211 
8212  // Add to m_Pools.
8213  {
8214  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
8215  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
8216  }
8217 
8218  return VK_SUCCESS;
8219 }
8220 
8221 void VmaAllocator_T::DestroyPool(VmaPool pool)
8222 {
8223  // Remove from m_Pools.
8224  {
8225  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
8226  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
8227  VMA_ASSERT(success && "Pool not found in Allocator.");
8228  }
8229 
8230  vma_delete(this, pool);
8231 }
8232 
8233 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
8234 {
8235  pool->m_BlockVector.GetPoolStats(pPoolStats);
8236 }
8237 
8238 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
8239 {
8240  m_CurrentFrameIndex.store(frameIndex);
8241 }
8242 
8243 void VmaAllocator_T::MakePoolAllocationsLost(
8244  VmaPool hPool,
8245  size_t* pLostAllocationCount)
8246 {
8247  hPool->m_BlockVector.MakePoolAllocationsLost(
8248  m_CurrentFrameIndex.load(),
8249  pLostAllocationCount);
8250 }
8251 
8252 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
8253 {
8254  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
8255  (*pAllocation)->InitLost();
8256 }
8257 
8258 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
8259 {
8260  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
8261 
8262  VkResult res;
8263  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
8264  {
8265  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
8266  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
8267  {
8268  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
8269  if(res == VK_SUCCESS)
8270  {
8271  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
8272  }
8273  }
8274  else
8275  {
8276  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
8277  }
8278  }
8279  else
8280  {
8281  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
8282  }
8283 
8284  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
8285  {
8286  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
8287  }
8288 
8289  return res;
8290 }
8291 
8292 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
8293 {
8294  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
8295  {
8296  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
8297  }
8298 
8299  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
8300 
8301  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
8302  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
8303  {
8304  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
8305  m_HeapSizeLimit[heapIndex] += size;
8306  }
8307 }
8308 
8309 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
8310 {
8311  if(hAllocation->CanBecomeLost())
8312  {
8313  return VK_ERROR_MEMORY_MAP_FAILED;
8314  }
8315 
8316  switch(hAllocation->GetType())
8317  {
8318  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8319  {
8320  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
8321  char *pBytes = VMA_NULL;
8322  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
8323  if(res == VK_SUCCESS)
8324  {
8325  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
8326  hAllocation->BlockAllocMap();
8327  }
8328  return res;
8329  }
8330  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8331  return hAllocation->DedicatedAllocMap(this, ppData);
8332  default:
8333  VMA_ASSERT(0);
8334  return VK_ERROR_MEMORY_MAP_FAILED;
8335  }
8336 }
8337 
8338 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
8339 {
8340  switch(hAllocation->GetType())
8341  {
8342  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8343  {
8344  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
8345  hAllocation->BlockAllocUnmap();
8346  pBlock->Unmap(this, 1);
8347  }
8348  break;
8349  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8350  hAllocation->DedicatedAllocUnmap(this);
8351  break;
8352  default:
8353  VMA_ASSERT(0);
8354  }
8355 }
8356 
8357 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
8358 {
8359  VkResult res = VK_SUCCESS;
8360  switch(hAllocation->GetType())
8361  {
8362  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8363  res = GetVulkanFunctions().vkBindBufferMemory(
8364  m_hDevice,
8365  hBuffer,
8366  hAllocation->GetMemory(),
8367  0); //memoryOffset
8368  break;
8369  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8370  {
8371  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
8372  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
8373  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
8374  break;
8375  }
8376  default:
8377  VMA_ASSERT(0);
8378  }
8379  return res;
8380 }
8381 
8382 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
8383 {
8384  VkResult res = VK_SUCCESS;
8385  switch(hAllocation->GetType())
8386  {
8387  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8388  res = GetVulkanFunctions().vkBindImageMemory(
8389  m_hDevice,
8390  hImage,
8391  hAllocation->GetMemory(),
8392  0); //memoryOffset
8393  break;
8394  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8395  {
8396  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
8397  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
8398  res = pBlock->BindImageMemory(this, hAllocation, hImage);
8399  break;
8400  }
8401  default:
8402  VMA_ASSERT(0);
8403  }
8404  return res;
8405 }
8406 
8407 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
8408 {
8409  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
8410 
8411  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
8412  {
8413  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
8414  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
8415  VMA_ASSERT(pDedicatedAllocations);
8416  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
8417  VMA_ASSERT(success);
8418  }
8419 
8420  VkDeviceMemory hMemory = allocation->GetMemory();
8421 
8422  if(allocation->GetMappedData() != VMA_NULL)
8423  {
8424  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
8425  }
8426 
8427  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
8428 
8429  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
8430 }
8431 
8432 #if VMA_STATS_STRING_ENABLED
8433 
8434 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
8435 {
8436  bool dedicatedAllocationsStarted = false;
8437  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8438  {
8439  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
8440  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
8441  VMA_ASSERT(pDedicatedAllocVector);
8442  if(pDedicatedAllocVector->empty() == false)
8443  {
8444  if(dedicatedAllocationsStarted == false)
8445  {
8446  dedicatedAllocationsStarted = true;
8447  json.WriteString("DedicatedAllocations");
8448  json.BeginObject();
8449  }
8450 
8451  json.BeginString("Type ");
8452  json.ContinueString(memTypeIndex);
8453  json.EndString();
8454 
8455  json.BeginArray();
8456 
8457  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
8458  {
8459  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
8460  json.BeginObject(true);
8461 
8462  json.WriteString("Type");
8463  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
8464 
8465  json.WriteString("Size");
8466  json.WriteNumber(hAlloc->GetSize());
8467 
8468  const void* pUserData = hAlloc->GetUserData();
8469  if(pUserData != VMA_NULL)
8470  {
8471  json.WriteString("UserData");
8472  if(hAlloc->IsUserDataString())
8473  {
8474  json.WriteString((const char*)pUserData);
8475  }
8476  else
8477  {
8478  json.BeginString();
8479  json.ContinueString_Pointer(pUserData);
8480  json.EndString();
8481  }
8482  }
8483 
8484  json.EndObject();
8485  }
8486 
8487  json.EndArray();
8488  }
8489  }
8490  if(dedicatedAllocationsStarted)
8491  {
8492  json.EndObject();
8493  }
8494 
8495  {
8496  bool allocationsStarted = false;
8497  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8498  {
8499  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
8500  {
8501  if(allocationsStarted == false)
8502  {
8503  allocationsStarted = true;
8504  json.WriteString("DefaultPools");
8505  json.BeginObject();
8506  }
8507 
8508  json.BeginString("Type ");
8509  json.ContinueString(memTypeIndex);
8510  json.EndString();
8511 
8512  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
8513  }
8514  }
8515  if(allocationsStarted)
8516  {
8517  json.EndObject();
8518  }
8519  }
8520 
8521  {
8522  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
8523  const size_t poolCount = m_Pools.size();
8524  if(poolCount > 0)
8525  {
8526  json.WriteString("Pools");
8527  json.BeginArray();
8528  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
8529  {
8530  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
8531  }
8532  json.EndArray();
8533  }
8534  }
8535 }
8536 
8537 #endif // #if VMA_STATS_STRING_ENABLED
8538 
8539 static VkResult AllocateMemoryForImage(
8540  VmaAllocator allocator,
8541  VkImage image,
8542  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8543  VmaSuballocationType suballocType,
8544  VmaAllocation* pAllocation)
8545 {
8546  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
8547 
8548  VkMemoryRequirements vkMemReq = {};
8549  bool requiresDedicatedAllocation = false;
8550  bool prefersDedicatedAllocation = false;
8551  allocator->GetImageMemoryRequirements(image, vkMemReq,
8552  requiresDedicatedAllocation, prefersDedicatedAllocation);
8553 
8554  return allocator->AllocateMemory(
8555  vkMemReq,
8556  requiresDedicatedAllocation,
8557  prefersDedicatedAllocation,
8558  VK_NULL_HANDLE, // dedicatedBuffer
8559  image, // dedicatedImage
8560  *pAllocationCreateInfo,
8561  suballocType,
8562  pAllocation);
8563 }
8564 
8566 // Public interface
8567 
8568 VkResult vmaCreateAllocator(
8569  const VmaAllocatorCreateInfo* pCreateInfo,
8570  VmaAllocator* pAllocator)
8571 {
8572  VMA_ASSERT(pCreateInfo && pAllocator);
8573  VMA_DEBUG_LOG("vmaCreateAllocator");
8574  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
8575  return VK_SUCCESS;
8576 }
8577 
8578 void vmaDestroyAllocator(
8579  VmaAllocator allocator)
8580 {
8581  if(allocator != VK_NULL_HANDLE)
8582  {
8583  VMA_DEBUG_LOG("vmaDestroyAllocator");
8584  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
8585  vma_delete(&allocationCallbacks, allocator);
8586  }
8587 }
8588 
8590  VmaAllocator allocator,
8591  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
8592 {
8593  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
8594  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
8595 }
8596 
8598  VmaAllocator allocator,
8599  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
8600 {
8601  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
8602  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
8603 }
8604 
8606  VmaAllocator allocator,
8607  uint32_t memoryTypeIndex,
8608  VkMemoryPropertyFlags* pFlags)
8609 {
8610  VMA_ASSERT(allocator && pFlags);
8611  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
8612  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
8613 }
8614 
8616  VmaAllocator allocator,
8617  uint32_t frameIndex)
8618 {
8619  VMA_ASSERT(allocator);
8620  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
8621 
8622  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8623 
8624  allocator->SetCurrentFrameIndex(frameIndex);
8625 }
8626 
8627 void vmaCalculateStats(
8628  VmaAllocator allocator,
8629  VmaStats* pStats)
8630 {
8631  VMA_ASSERT(allocator && pStats);
8632  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8633  allocator->CalculateStats(pStats);
8634 }
8635 
8636 #if VMA_STATS_STRING_ENABLED
8637 
8638 void vmaBuildStatsString(
8639  VmaAllocator allocator,
8640  char** ppStatsString,
8641  VkBool32 detailedMap)
8642 {
8643  VMA_ASSERT(allocator && ppStatsString);
8644  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8645 
8646  VmaStringBuilder sb(allocator);
8647  {
8648  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
8649  json.BeginObject();
8650 
8651  VmaStats stats;
8652  allocator->CalculateStats(&stats);
8653 
8654  json.WriteString("Total");
8655  VmaPrintStatInfo(json, stats.total);
8656 
8657  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
8658  {
8659  json.BeginString("Heap ");
8660  json.ContinueString(heapIndex);
8661  json.EndString();
8662  json.BeginObject();
8663 
8664  json.WriteString("Size");
8665  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
8666 
8667  json.WriteString("Flags");
8668  json.BeginArray(true);
8669  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
8670  {
8671  json.WriteString("DEVICE_LOCAL");
8672  }
8673  json.EndArray();
8674 
8675  if(stats.memoryHeap[heapIndex].blockCount > 0)
8676  {
8677  json.WriteString("Stats");
8678  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
8679  }
8680 
8681  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
8682  {
8683  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
8684  {
8685  json.BeginString("Type ");
8686  json.ContinueString(typeIndex);
8687  json.EndString();
8688 
8689  json.BeginObject();
8690 
8691  json.WriteString("Flags");
8692  json.BeginArray(true);
8693  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
8694  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
8695  {
8696  json.WriteString("DEVICE_LOCAL");
8697  }
8698  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
8699  {
8700  json.WriteString("HOST_VISIBLE");
8701  }
8702  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
8703  {
8704  json.WriteString("HOST_COHERENT");
8705  }
8706  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
8707  {
8708  json.WriteString("HOST_CACHED");
8709  }
8710  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
8711  {
8712  json.WriteString("LAZILY_ALLOCATED");
8713  }
8714  json.EndArray();
8715 
8716  if(stats.memoryType[typeIndex].blockCount > 0)
8717  {
8718  json.WriteString("Stats");
8719  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
8720  }
8721 
8722  json.EndObject();
8723  }
8724  }
8725 
8726  json.EndObject();
8727  }
8728  if(detailedMap == VK_TRUE)
8729  {
8730  allocator->PrintDetailedMap(json);
8731  }
8732 
8733  json.EndObject();
8734  }
8735 
8736  const size_t len = sb.GetLength();
8737  char* const pChars = vma_new_array(allocator, char, len + 1);
8738  if(len > 0)
8739  {
8740  memcpy(pChars, sb.GetData(), len);
8741  }
8742  pChars[len] = '\0';
8743  *ppStatsString = pChars;
8744 }
8745 
8746 void vmaFreeStatsString(
8747  VmaAllocator allocator,
8748  char* pStatsString)
8749 {
8750  if(pStatsString != VMA_NULL)
8751  {
8752  VMA_ASSERT(allocator);
8753  size_t len = strlen(pStatsString);
8754  vma_delete_array(allocator, pStatsString, len + 1);
8755  }
8756 }
8757 
8758 #endif // #if VMA_STATS_STRING_ENABLED
8759 
8760 /*
8761 This function is not protected by any mutex because it just reads immutable data.
8762 */
8763 VkResult vmaFindMemoryTypeIndex(
8764  VmaAllocator allocator,
8765  uint32_t memoryTypeBits,
8766  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8767  uint32_t* pMemoryTypeIndex)
8768 {
8769  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8770  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8771  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8772 
8773  if(pAllocationCreateInfo->memoryTypeBits != 0)
8774  {
8775  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
8776  }
8777 
8778  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
8779  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
8780 
8781  // Convert usage to requiredFlags and preferredFlags.
8782  switch(pAllocationCreateInfo->usage)
8783  {
8785  break;
8787  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8788  break;
8790  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
8791  break;
8793  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8794  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8795  break;
8797  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8798  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
8799  break;
8800  default:
8801  break;
8802  }
8803 
8804  *pMemoryTypeIndex = UINT32_MAX;
8805  uint32_t minCost = UINT32_MAX;
8806  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
8807  memTypeIndex < allocator->GetMemoryTypeCount();
8808  ++memTypeIndex, memTypeBit <<= 1)
8809  {
8810  // This memory type is acceptable according to memoryTypeBits bitmask.
8811  if((memTypeBit & memoryTypeBits) != 0)
8812  {
8813  const VkMemoryPropertyFlags currFlags =
8814  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
8815  // This memory type contains requiredFlags.
8816  if((requiredFlags & ~currFlags) == 0)
8817  {
8818  // Calculate cost as number of bits from preferredFlags not present in this memory type.
8819  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
8820  // Remember memory type with lowest cost.
8821  if(currCost < minCost)
8822  {
8823  *pMemoryTypeIndex = memTypeIndex;
8824  if(currCost == 0)
8825  {
8826  return VK_SUCCESS;
8827  }
8828  minCost = currCost;
8829  }
8830  }
8831  }
8832  }
8833  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
8834 }
8835 
8837  VmaAllocator allocator,
8838  const VkBufferCreateInfo* pBufferCreateInfo,
8839  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8840  uint32_t* pMemoryTypeIndex)
8841 {
8842  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8843  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
8844  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8845  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8846 
8847  const VkDevice hDev = allocator->m_hDevice;
8848  VkBuffer hBuffer = VK_NULL_HANDLE;
8849  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
8850  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
8851  if(res == VK_SUCCESS)
8852  {
8853  VkMemoryRequirements memReq = {};
8854  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
8855  hDev, hBuffer, &memReq);
8856 
8857  res = vmaFindMemoryTypeIndex(
8858  allocator,
8859  memReq.memoryTypeBits,
8860  pAllocationCreateInfo,
8861  pMemoryTypeIndex);
8862 
8863  allocator->GetVulkanFunctions().vkDestroyBuffer(
8864  hDev, hBuffer, allocator->GetAllocationCallbacks());
8865  }
8866  return res;
8867 }
8868 
8870  VmaAllocator allocator,
8871  const VkImageCreateInfo* pImageCreateInfo,
8872  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8873  uint32_t* pMemoryTypeIndex)
8874 {
8875  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8876  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
8877  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8878  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8879 
8880  const VkDevice hDev = allocator->m_hDevice;
8881  VkImage hImage = VK_NULL_HANDLE;
8882  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
8883  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
8884  if(res == VK_SUCCESS)
8885  {
8886  VkMemoryRequirements memReq = {};
8887  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
8888  hDev, hImage, &memReq);
8889 
8890  res = vmaFindMemoryTypeIndex(
8891  allocator,
8892  memReq.memoryTypeBits,
8893  pAllocationCreateInfo,
8894  pMemoryTypeIndex);
8895 
8896  allocator->GetVulkanFunctions().vkDestroyImage(
8897  hDev, hImage, allocator->GetAllocationCallbacks());
8898  }
8899  return res;
8900 }
8901 
8902 VkResult vmaCreatePool(
8903  VmaAllocator allocator,
8904  const VmaPoolCreateInfo* pCreateInfo,
8905  VmaPool* pPool)
8906 {
8907  VMA_ASSERT(allocator && pCreateInfo && pPool);
8908 
8909  VMA_DEBUG_LOG("vmaCreatePool");
8910 
8911  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8912 
8913  return allocator->CreatePool(pCreateInfo, pPool);
8914 }
8915 
8916 void vmaDestroyPool(
8917  VmaAllocator allocator,
8918  VmaPool pool)
8919 {
8920  VMA_ASSERT(allocator);
8921 
8922  if(pool == VK_NULL_HANDLE)
8923  {
8924  return;
8925  }
8926 
8927  VMA_DEBUG_LOG("vmaDestroyPool");
8928 
8929  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8930 
8931  allocator->DestroyPool(pool);
8932 }
8933 
8934 void vmaGetPoolStats(
8935  VmaAllocator allocator,
8936  VmaPool pool,
8937  VmaPoolStats* pPoolStats)
8938 {
8939  VMA_ASSERT(allocator && pool && pPoolStats);
8940 
8941  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8942 
8943  allocator->GetPoolStats(pool, pPoolStats);
8944 }
8945 
8947  VmaAllocator allocator,
8948  VmaPool pool,
8949  size_t* pLostAllocationCount)
8950 {
8951  VMA_ASSERT(allocator && pool);
8952 
8953  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8954 
8955  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
8956 }
8957 
8958 VkResult vmaAllocateMemory(
8959  VmaAllocator allocator,
8960  const VkMemoryRequirements* pVkMemoryRequirements,
8961  const VmaAllocationCreateInfo* pCreateInfo,
8962  VmaAllocation* pAllocation,
8963  VmaAllocationInfo* pAllocationInfo)
8964 {
8965  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
8966 
8967  VMA_DEBUG_LOG("vmaAllocateMemory");
8968 
8969  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8970 
8971  VkResult result = allocator->AllocateMemory(
8972  *pVkMemoryRequirements,
8973  false, // requiresDedicatedAllocation
8974  false, // prefersDedicatedAllocation
8975  VK_NULL_HANDLE, // dedicatedBuffer
8976  VK_NULL_HANDLE, // dedicatedImage
8977  *pCreateInfo,
8978  VMA_SUBALLOCATION_TYPE_UNKNOWN,
8979  pAllocation);
8980 
8981  if(pAllocationInfo && result == VK_SUCCESS)
8982  {
8983  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8984  }
8985 
8986  return result;
8987 }
8988 
8990  VmaAllocator allocator,
8991  VkBuffer buffer,
8992  const VmaAllocationCreateInfo* pCreateInfo,
8993  VmaAllocation* pAllocation,
8994  VmaAllocationInfo* pAllocationInfo)
8995 {
8996  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
8997 
8998  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
8999 
9000  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9001 
9002  VkMemoryRequirements vkMemReq = {};
9003  bool requiresDedicatedAllocation = false;
9004  bool prefersDedicatedAllocation = false;
9005  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
9006  requiresDedicatedAllocation,
9007  prefersDedicatedAllocation);
9008 
9009  VkResult result = allocator->AllocateMemory(
9010  vkMemReq,
9011  requiresDedicatedAllocation,
9012  prefersDedicatedAllocation,
9013  buffer, // dedicatedBuffer
9014  VK_NULL_HANDLE, // dedicatedImage
9015  *pCreateInfo,
9016  VMA_SUBALLOCATION_TYPE_BUFFER,
9017  pAllocation);
9018 
9019  if(pAllocationInfo && result == VK_SUCCESS)
9020  {
9021  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
9022  }
9023 
9024  return result;
9025 }
9026 
9027 VkResult vmaAllocateMemoryForImage(
9028  VmaAllocator allocator,
9029  VkImage image,
9030  const VmaAllocationCreateInfo* pCreateInfo,
9031  VmaAllocation* pAllocation,
9032  VmaAllocationInfo* pAllocationInfo)
9033 {
9034  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
9035 
9036  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
9037 
9038  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9039 
9040  VkResult result = AllocateMemoryForImage(
9041  allocator,
9042  image,
9043  pCreateInfo,
9044  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
9045  pAllocation);
9046 
9047  if(pAllocationInfo && result == VK_SUCCESS)
9048  {
9049  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
9050  }
9051 
9052  return result;
9053 }
9054 
9055 void vmaFreeMemory(
9056  VmaAllocator allocator,
9057  VmaAllocation allocation)
9058 {
9059  VMA_ASSERT(allocator && allocation);
9060 
9061  VMA_DEBUG_LOG("vmaFreeMemory");
9062 
9063  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9064 
9065  allocator->FreeMemory(allocation);
9066 }
9067 
9069  VmaAllocator allocator,
9070  VmaAllocation allocation,
9071  VmaAllocationInfo* pAllocationInfo)
9072 {
9073  VMA_ASSERT(allocator && allocation && pAllocationInfo);
9074 
9075  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9076 
9077  allocator->GetAllocationInfo(allocation, pAllocationInfo);
9078 }
9079 
9080 VkBool32 vmaTouchAllocation(
9081  VmaAllocator allocator,
9082  VmaAllocation allocation)
9083 {
9084  VMA_ASSERT(allocator && allocation);
9085 
9086  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9087 
9088  return allocator->TouchAllocation(allocation);
9089 }
9090 
9092  VmaAllocator allocator,
9093  VmaAllocation allocation,
9094  void* pUserData)
9095 {
9096  VMA_ASSERT(allocator && allocation);
9097 
9098  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9099 
9100  allocation->SetUserData(allocator, pUserData);
9101 }
9102 
9104  VmaAllocator allocator,
9105  VmaAllocation* pAllocation)
9106 {
9107  VMA_ASSERT(allocator && pAllocation);
9108 
9109  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
9110 
9111  allocator->CreateLostAllocation(pAllocation);
9112 }
9113 
9114 VkResult vmaMapMemory(
9115  VmaAllocator allocator,
9116  VmaAllocation allocation,
9117  void** ppData)
9118 {
9119  VMA_ASSERT(allocator && allocation && ppData);
9120 
9121  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9122 
9123  return allocator->Map(allocation, ppData);
9124 }
9125 
9126 void vmaUnmapMemory(
9127  VmaAllocator allocator,
9128  VmaAllocation allocation)
9129 {
9130  VMA_ASSERT(allocator && allocation);
9131 
9132  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9133 
9134  allocator->Unmap(allocation);
9135 }
9136 
9137 VkResult vmaDefragment(
9138  VmaAllocator allocator,
9139  VmaAllocation* pAllocations,
9140  size_t allocationCount,
9141  VkBool32* pAllocationsChanged,
9142  const VmaDefragmentationInfo *pDefragmentationInfo,
9143  VmaDefragmentationStats* pDefragmentationStats)
9144 {
9145  VMA_ASSERT(allocator && pAllocations);
9146 
9147  VMA_DEBUG_LOG("vmaDefragment");
9148 
9149  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9150 
9151  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
9152 }
9153 
9154 VkResult vmaBindBufferMemory(
9155  VmaAllocator allocator,
9156  VmaAllocation allocation,
9157  VkBuffer buffer)
9158 {
9159  VMA_ASSERT(allocator && allocation && buffer);
9160 
9161  VMA_DEBUG_LOG("vmaBindBufferMemory");
9162 
9163  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9164 
9165  return allocator->BindBufferMemory(allocation, buffer);
9166 }
9167 
9168 VkResult vmaBindImageMemory(
9169  VmaAllocator allocator,
9170  VmaAllocation allocation,
9171  VkImage image)
9172 {
9173  VMA_ASSERT(allocator && allocation && image);
9174 
9175  VMA_DEBUG_LOG("vmaBindImageMemory");
9176 
9177  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9178 
9179  return allocator->BindImageMemory(allocation, image);
9180 }
9181 
9182 VkResult vmaCreateBuffer(
9183  VmaAllocator allocator,
9184  const VkBufferCreateInfo* pBufferCreateInfo,
9185  const VmaAllocationCreateInfo* pAllocationCreateInfo,
9186  VkBuffer* pBuffer,
9187  VmaAllocation* pAllocation,
9188  VmaAllocationInfo* pAllocationInfo)
9189 {
9190  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
9191 
9192  VMA_DEBUG_LOG("vmaCreateBuffer");
9193 
9194  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9195 
9196  *pBuffer = VK_NULL_HANDLE;
9197  *pAllocation = VK_NULL_HANDLE;
9198 
9199  // 1. Create VkBuffer.
9200  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
9201  allocator->m_hDevice,
9202  pBufferCreateInfo,
9203  allocator->GetAllocationCallbacks(),
9204  pBuffer);
9205  if(res >= 0)
9206  {
9207  // 2. vkGetBufferMemoryRequirements.
9208  VkMemoryRequirements vkMemReq = {};
9209  bool requiresDedicatedAllocation = false;
9210  bool prefersDedicatedAllocation = false;
9211  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
9212  requiresDedicatedAllocation, prefersDedicatedAllocation);
9213 
9214  // Make sure alignment requirements for specific buffer usages reported
9215  // in Physical Device Properties are included in alignment reported by memory requirements.
9216  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
9217  {
9218  VMA_ASSERT(vkMemReq.alignment %
9219  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
9220  }
9221  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
9222  {
9223  VMA_ASSERT(vkMemReq.alignment %
9224  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
9225  }
9226  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
9227  {
9228  VMA_ASSERT(vkMemReq.alignment %
9229  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
9230  }
9231 
9232  // 3. Allocate memory using allocator.
9233  res = allocator->AllocateMemory(
9234  vkMemReq,
9235  requiresDedicatedAllocation,
9236  prefersDedicatedAllocation,
9237  *pBuffer, // dedicatedBuffer
9238  VK_NULL_HANDLE, // dedicatedImage
9239  *pAllocationCreateInfo,
9240  VMA_SUBALLOCATION_TYPE_BUFFER,
9241  pAllocation);
9242  if(res >= 0)
9243  {
9244  // 3. Bind buffer with memory.
9245  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
9246  if(res >= 0)
9247  {
9248  // All steps succeeded.
9249  if(pAllocationInfo != VMA_NULL)
9250  {
9251  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
9252  }
9253  return VK_SUCCESS;
9254  }
9255  allocator->FreeMemory(*pAllocation);
9256  *pAllocation = VK_NULL_HANDLE;
9257  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
9258  *pBuffer = VK_NULL_HANDLE;
9259  return res;
9260  }
9261  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
9262  *pBuffer = VK_NULL_HANDLE;
9263  return res;
9264  }
9265  return res;
9266 }
9267 
9268 void vmaDestroyBuffer(
9269  VmaAllocator allocator,
9270  VkBuffer buffer,
9271  VmaAllocation allocation)
9272 {
9273  if(buffer != VK_NULL_HANDLE)
9274  {
9275  VMA_ASSERT(allocator);
9276 
9277  VMA_DEBUG_LOG("vmaDestroyBuffer");
9278 
9279  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9280 
9281  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
9282 
9283  allocator->FreeMemory(allocation);
9284  }
9285 }
9286 
9287 VkResult vmaCreateImage(
9288  VmaAllocator allocator,
9289  const VkImageCreateInfo* pImageCreateInfo,
9290  const VmaAllocationCreateInfo* pAllocationCreateInfo,
9291  VkImage* pImage,
9292  VmaAllocation* pAllocation,
9293  VmaAllocationInfo* pAllocationInfo)
9294 {
9295  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
9296 
9297  VMA_DEBUG_LOG("vmaCreateImage");
9298 
9299  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9300 
9301  *pImage = VK_NULL_HANDLE;
9302  *pAllocation = VK_NULL_HANDLE;
9303 
9304  // 1. Create VkImage.
9305  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
9306  allocator->m_hDevice,
9307  pImageCreateInfo,
9308  allocator->GetAllocationCallbacks(),
9309  pImage);
9310  if(res >= 0)
9311  {
9312  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
9313  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
9314  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
9315 
9316  // 2. Allocate memory using allocator.
9317  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
9318  if(res >= 0)
9319  {
9320  // 3. Bind image with memory.
9321  res = allocator->BindImageMemory(*pAllocation, *pImage);
9322  if(res >= 0)
9323  {
9324  // All steps succeeded.
9325  if(pAllocationInfo != VMA_NULL)
9326  {
9327  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
9328  }
9329  return VK_SUCCESS;
9330  }
9331  allocator->FreeMemory(*pAllocation);
9332  *pAllocation = VK_NULL_HANDLE;
9333  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
9334  *pImage = VK_NULL_HANDLE;
9335  return res;
9336  }
9337  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
9338  *pImage = VK_NULL_HANDLE;
9339  return res;
9340  }
9341  return res;
9342 }
9343 
9344 void vmaDestroyImage(
9345  VmaAllocator allocator,
9346  VkImage image,
9347  VmaAllocation allocation)
9348 {
9349  if(image != VK_NULL_HANDLE)
9350  {
9351  VMA_ASSERT(allocator);
9352 
9353  VMA_DEBUG_LOG("vmaDestroyImage");
9354 
9355  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9356 
9357  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
9358 
9359  allocator->FreeMemory(allocation);
9360  }
9361 }
9362 
9363 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1153
-
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:1415
+Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1078 #include <vulkan/vulkan.h>
1079 
1089 VK_DEFINE_HANDLE(VmaAllocator)
1090 
1091 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1093  VmaAllocator allocator,
1094  uint32_t memoryType,
1095  VkDeviceMemory memory,
1096  VkDeviceSize size);
1098 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1099  VmaAllocator allocator,
1100  uint32_t memoryType,
1101  VkDeviceMemory memory,
1102  VkDeviceSize size);
1103 
1117 
1147 
1150 typedef VkFlags VmaAllocatorCreateFlags;
1151 
1156 typedef struct VmaVulkanFunctions {
1157  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1158  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1159  PFN_vkAllocateMemory vkAllocateMemory;
1160  PFN_vkFreeMemory vkFreeMemory;
1161  PFN_vkMapMemory vkMapMemory;
1162  PFN_vkUnmapMemory vkUnmapMemory;
1163  PFN_vkBindBufferMemory vkBindBufferMemory;
1164  PFN_vkBindImageMemory vkBindImageMemory;
1165  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1166  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1167  PFN_vkCreateBuffer vkCreateBuffer;
1168  PFN_vkDestroyBuffer vkDestroyBuffer;
1169  PFN_vkCreateImage vkCreateImage;
1170  PFN_vkDestroyImage vkDestroyImage;
1171  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1172  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1174 
1177 {
1179  VmaAllocatorCreateFlags flags;
1181 
1182  VkPhysicalDevice physicalDevice;
1184 
1185  VkDevice device;
1187 
1190 
1191  const VkAllocationCallbacks* pAllocationCallbacks;
1193 
1232  const VkDeviceSize* pHeapSizeLimit;
1246 
1248 VkResult vmaCreateAllocator(
1249  const VmaAllocatorCreateInfo* pCreateInfo,
1250  VmaAllocator* pAllocator);
1251 
1253 void vmaDestroyAllocator(
1254  VmaAllocator allocator);
1255 
1261  VmaAllocator allocator,
1262  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1263 
1269  VmaAllocator allocator,
1270  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1271 
1279  VmaAllocator allocator,
1280  uint32_t memoryTypeIndex,
1281  VkMemoryPropertyFlags* pFlags);
1282 
1292  VmaAllocator allocator,
1293  uint32_t frameIndex);
1294 
1297 typedef struct VmaStatInfo
1298 {
1300  uint32_t blockCount;
1306  VkDeviceSize usedBytes;
1308  VkDeviceSize unusedBytes;
1309  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
1310  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
1311 } VmaStatInfo;
1312 
1314 typedef struct VmaStats
1315 {
1316  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1317  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1319 } VmaStats;
1320 
1322 void vmaCalculateStats(
1323  VmaAllocator allocator,
1324  VmaStats* pStats);
1325 
1326 #define VMA_STATS_STRING_ENABLED 1
1327 
1328 #if VMA_STATS_STRING_ENABLED
1329 
1331 
1333 void vmaBuildStatsString(
1334  VmaAllocator allocator,
1335  char** ppStatsString,
1336  VkBool32 detailedMap);
1337 
1338 void vmaFreeStatsString(
1339  VmaAllocator allocator,
1340  char* pStatsString);
1341 
1342 #endif // #if VMA_STATS_STRING_ENABLED
1343 
1352 VK_DEFINE_HANDLE(VmaPool)
1353 
1354 typedef enum VmaMemoryUsage
1355 {
1404 } VmaMemoryUsage;
1405 
1420 
1470 
1474 
1476 {
1478  VmaAllocationCreateFlags flags;
1489  VkMemoryPropertyFlags requiredFlags;
1494  VkMemoryPropertyFlags preferredFlags;
1502  uint32_t memoryTypeBits;
1515  void* pUserData;
1517 
1534 VkResult vmaFindMemoryTypeIndex(
1535  VmaAllocator allocator,
1536  uint32_t memoryTypeBits,
1537  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1538  uint32_t* pMemoryTypeIndex);
1539 
1553  VmaAllocator allocator,
1554  const VkBufferCreateInfo* pBufferCreateInfo,
1555  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1556  uint32_t* pMemoryTypeIndex);
1557 
1571  VmaAllocator allocator,
1572  const VkImageCreateInfo* pImageCreateInfo,
1573  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1574  uint32_t* pMemoryTypeIndex);
1575 
1596 
1599 typedef VkFlags VmaPoolCreateFlags;
1600 
1603 typedef struct VmaPoolCreateInfo {
1609  VmaPoolCreateFlags flags;
1614  VkDeviceSize blockSize;
1643 
1646 typedef struct VmaPoolStats {
1649  VkDeviceSize size;
1652  VkDeviceSize unusedSize;
1665  VkDeviceSize unusedRangeSizeMax;
1666 } VmaPoolStats;
1667 
1674 VkResult vmaCreatePool(
1675  VmaAllocator allocator,
1676  const VmaPoolCreateInfo* pCreateInfo,
1677  VmaPool* pPool);
1678 
1681 void vmaDestroyPool(
1682  VmaAllocator allocator,
1683  VmaPool pool);
1684 
1691 void vmaGetPoolStats(
1692  VmaAllocator allocator,
1693  VmaPool pool,
1694  VmaPoolStats* pPoolStats);
1695 
1703  VmaAllocator allocator,
1704  VmaPool pool,
1705  size_t* pLostAllocationCount);
1706 
1731 VK_DEFINE_HANDLE(VmaAllocation)
1732 
1733 
1735 typedef struct VmaAllocationInfo {
1740  uint32_t memoryType;
1749  VkDeviceMemory deviceMemory;
1754  VkDeviceSize offset;
1759  VkDeviceSize size;
1773  void* pUserData;
1775 
1786 VkResult vmaAllocateMemory(
1787  VmaAllocator allocator,
1788  const VkMemoryRequirements* pVkMemoryRequirements,
1789  const VmaAllocationCreateInfo* pCreateInfo,
1790  VmaAllocation* pAllocation,
1791  VmaAllocationInfo* pAllocationInfo);
1792 
1800  VmaAllocator allocator,
1801  VkBuffer buffer,
1802  const VmaAllocationCreateInfo* pCreateInfo,
1803  VmaAllocation* pAllocation,
1804  VmaAllocationInfo* pAllocationInfo);
1805 
1807 VkResult vmaAllocateMemoryForImage(
1808  VmaAllocator allocator,
1809  VkImage image,
1810  const VmaAllocationCreateInfo* pCreateInfo,
1811  VmaAllocation* pAllocation,
1812  VmaAllocationInfo* pAllocationInfo);
1813 
1815 void vmaFreeMemory(
1816  VmaAllocator allocator,
1817  VmaAllocation allocation);
1818 
1836  VmaAllocator allocator,
1837  VmaAllocation allocation,
1838  VmaAllocationInfo* pAllocationInfo);
1839 
1854 VkBool32 vmaTouchAllocation(
1855  VmaAllocator allocator,
1856  VmaAllocation allocation);
1857 
1872  VmaAllocator allocator,
1873  VmaAllocation allocation,
1874  void* pUserData);
1875 
1887  VmaAllocator allocator,
1888  VmaAllocation* pAllocation);
1889 
1924 VkResult vmaMapMemory(
1925  VmaAllocator allocator,
1926  VmaAllocation allocation,
1927  void** ppData);
1928 
1933 void vmaUnmapMemory(
1934  VmaAllocator allocator,
1935  VmaAllocation allocation);
1936 
1938 typedef struct VmaDefragmentationInfo {
1943  VkDeviceSize maxBytesToMove;
1950 
1952 typedef struct VmaDefragmentationStats {
1954  VkDeviceSize bytesMoved;
1956  VkDeviceSize bytesFreed;
1962 
2045 VkResult vmaDefragment(
2046  VmaAllocator allocator,
2047  VmaAllocation* pAllocations,
2048  size_t allocationCount,
2049  VkBool32* pAllocationsChanged,
2050  const VmaDefragmentationInfo *pDefragmentationInfo,
2051  VmaDefragmentationStats* pDefragmentationStats);
2052 
2065 VkResult vmaBindBufferMemory(
2066  VmaAllocator allocator,
2067  VmaAllocation allocation,
2068  VkBuffer buffer);
2069 
2082 VkResult vmaBindImageMemory(
2083  VmaAllocator allocator,
2084  VmaAllocation allocation,
2085  VkImage image);
2086 
2113 VkResult vmaCreateBuffer(
2114  VmaAllocator allocator,
2115  const VkBufferCreateInfo* pBufferCreateInfo,
2116  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2117  VkBuffer* pBuffer,
2118  VmaAllocation* pAllocation,
2119  VmaAllocationInfo* pAllocationInfo);
2120 
2132 void vmaDestroyBuffer(
2133  VmaAllocator allocator,
2134  VkBuffer buffer,
2135  VmaAllocation allocation);
2136 
2138 VkResult vmaCreateImage(
2139  VmaAllocator allocator,
2140  const VkImageCreateInfo* pImageCreateInfo,
2141  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2142  VkImage* pImage,
2143  VmaAllocation* pAllocation,
2144  VmaAllocationInfo* pAllocationInfo);
2145 
2157 void vmaDestroyImage(
2158  VmaAllocator allocator,
2159  VkImage image,
2160  VmaAllocation allocation);
2161 
2162 #ifdef __cplusplus
2163 }
2164 #endif
2165 
2166 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
2167 
2168 // For Visual Studio IntelliSense.
2169 #ifdef __INTELLISENSE__
2170 #define VMA_IMPLEMENTATION
2171 #endif
2172 
2173 #ifdef VMA_IMPLEMENTATION
2174 #undef VMA_IMPLEMENTATION
2175 
2176 #include <cstdint>
2177 #include <cstdlib>
2178 #include <cstring>
2179 
2180 /*******************************************************************************
2181 CONFIGURATION SECTION
2182 
2183 Define some of these macros before each #include of this header or change them
2184 here if you need other then default behavior depending on your environment.
2185 */
2186 
2187 /*
2188 Define this macro to 1 to make the library fetch pointers to Vulkan functions
2189 internally, like:
2190 
2191  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
2192 
2193 Define to 0 if you are going to provide you own pointers to Vulkan functions via
2194 VmaAllocatorCreateInfo::pVulkanFunctions.
2195 */
2196 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
2197 #define VMA_STATIC_VULKAN_FUNCTIONS 1
2198 #endif
2199 
2200 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
2201 //#define VMA_USE_STL_CONTAINERS 1
2202 
2203 /* Set this macro to 1 to make the library including and using STL containers:
2204 std::pair, std::vector, std::list, std::unordered_map.
2205 
2206 Set it to 0 or undefined to make the library using its own implementation of
2207 the containers.
2208 */
2209 #if VMA_USE_STL_CONTAINERS
2210  #define VMA_USE_STL_VECTOR 1
2211  #define VMA_USE_STL_UNORDERED_MAP 1
2212  #define VMA_USE_STL_LIST 1
2213 #endif
2214 
2215 #if VMA_USE_STL_VECTOR
2216  #include <vector>
2217 #endif
2218 
2219 #if VMA_USE_STL_UNORDERED_MAP
2220  #include <unordered_map>
2221 #endif
2222 
2223 #if VMA_USE_STL_LIST
2224  #include <list>
2225 #endif
2226 
2227 /*
2228 Following headers are used in this CONFIGURATION section only, so feel free to
2229 remove them if not needed.
2230 */
2231 #include <cassert> // for assert
2232 #include <algorithm> // for min, max
2233 #include <mutex> // for std::mutex
2234 #include <atomic> // for std::atomic
2235 
2236 #if !defined(_WIN32) && !defined(__APPLE__)
2237  #include <malloc.h> // for aligned_alloc()
2238 #endif
2239 
2240 #ifndef VMA_NULL
2241  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
2242  #define VMA_NULL nullptr
2243 #endif
2244 
2245 #if defined(__APPLE__) || defined(__ANDROID__)
2246 #include <cstdlib>
2247 void *aligned_alloc(size_t alignment, size_t size)
2248 {
2249  // alignment must be >= sizeof(void*)
2250  if(alignment < sizeof(void*))
2251  {
2252  alignment = sizeof(void*);
2253  }
2254 
2255  void *pointer;
2256  if(posix_memalign(&pointer, alignment, size) == 0)
2257  return pointer;
2258  return VMA_NULL;
2259 }
2260 #endif
2261 
2262 // Normal assert to check for programmer's errors, especially in Debug configuration.
2263 #ifndef VMA_ASSERT
2264  #ifdef _DEBUG
2265  #define VMA_ASSERT(expr) assert(expr)
2266  #else
2267  #define VMA_ASSERT(expr)
2268  #endif
2269 #endif
2270 
2271 // Assert that will be called very often, like inside data structures e.g. operator[].
2272 // Making it non-empty can make program slow.
2273 #ifndef VMA_HEAVY_ASSERT
2274  #ifdef _DEBUG
2275  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
2276  #else
2277  #define VMA_HEAVY_ASSERT(expr)
2278  #endif
2279 #endif
2280 
2281 #ifndef VMA_ALIGN_OF
2282  #define VMA_ALIGN_OF(type) (__alignof(type))
2283 #endif
2284 
2285 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
2286  #if defined(_WIN32)
2287  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
2288  #else
2289  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
2290  #endif
2291 #endif
2292 
2293 #ifndef VMA_SYSTEM_FREE
2294  #if defined(_WIN32)
2295  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
2296  #else
2297  #define VMA_SYSTEM_FREE(ptr) free(ptr)
2298  #endif
2299 #endif
2300 
2301 #ifndef VMA_MIN
2302  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
2303 #endif
2304 
2305 #ifndef VMA_MAX
2306  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
2307 #endif
2308 
2309 #ifndef VMA_SWAP
2310  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
2311 #endif
2312 
2313 #ifndef VMA_SORT
2314  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
2315 #endif
2316 
2317 #ifndef VMA_DEBUG_LOG
2318  #define VMA_DEBUG_LOG(format, ...)
2319  /*
2320  #define VMA_DEBUG_LOG(format, ...) do { \
2321  printf(format, __VA_ARGS__); \
2322  printf("\n"); \
2323  } while(false)
2324  */
2325 #endif
2326 
2327 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
2328 #if VMA_STATS_STRING_ENABLED
2329  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
2330  {
2331  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
2332  }
2333  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
2334  {
2335  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
2336  }
2337  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
2338  {
2339  snprintf(outStr, strLen, "%p", ptr);
2340  }
2341 #endif
2342 
2343 #ifndef VMA_MUTEX
2344  class VmaMutex
2345  {
2346  public:
2347  VmaMutex() { }
2348  ~VmaMutex() { }
2349  void Lock() { m_Mutex.lock(); }
2350  void Unlock() { m_Mutex.unlock(); }
2351  private:
2352  std::mutex m_Mutex;
2353  };
2354  #define VMA_MUTEX VmaMutex
2355 #endif
2356 
2357 /*
2358 If providing your own implementation, you need to implement a subset of std::atomic:
2359 
2360 - Constructor(uint32_t desired)
2361 - uint32_t load() const
2362 - void store(uint32_t desired)
2363 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
2364 */
2365 #ifndef VMA_ATOMIC_UINT32
2366  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
2367 #endif
2368 
2369 #ifndef VMA_BEST_FIT
2370 
2382  #define VMA_BEST_FIT (1)
2383 #endif
2384 
2385 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
2386 
2390  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
2391 #endif
2392 
2393 #ifndef VMA_DEBUG_ALIGNMENT
2394 
2398  #define VMA_DEBUG_ALIGNMENT (1)
2399 #endif
2400 
2401 #ifndef VMA_DEBUG_MARGIN
2402 
2406  #define VMA_DEBUG_MARGIN (0)
2407 #endif
2408 
2409 #ifndef VMA_DEBUG_GLOBAL_MUTEX
2410 
2414  #define VMA_DEBUG_GLOBAL_MUTEX (0)
2415 #endif
2416 
2417 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
2418 
2422  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
2423 #endif
2424 
2425 #ifndef VMA_SMALL_HEAP_MAX_SIZE
2426  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
2428 #endif
2429 
2430 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
2431  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
2433 #endif
2434 
2435 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
2436 
2437 /*******************************************************************************
2438 END OF CONFIGURATION
2439 */
2440 
2441 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
2442  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
2443 
2444 // Returns number of bits set to 1 in (v).
2445 static inline uint32_t VmaCountBitsSet(uint32_t v)
2446 {
2447  uint32_t c = v - ((v >> 1) & 0x55555555);
2448  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
2449  c = ((c >> 4) + c) & 0x0F0F0F0F;
2450  c = ((c >> 8) + c) & 0x00FF00FF;
2451  c = ((c >> 16) + c) & 0x0000FFFF;
2452  return c;
2453 }
2454 
2455 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
2456 // Use types like uint32_t, uint64_t as T.
2457 template <typename T>
2458 static inline T VmaAlignUp(T val, T align)
2459 {
2460  return (val + align - 1) / align * align;
2461 }
2462 
2463 // Division with mathematical rounding to nearest number.
2464 template <typename T>
2465 inline T VmaRoundDiv(T x, T y)
2466 {
2467  return (x + (y / (T)2)) / y;
2468 }
2469 
2470 #ifndef VMA_SORT
2471 
2472 template<typename Iterator, typename Compare>
2473 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
2474 {
2475  Iterator centerValue = end; --centerValue;
2476  Iterator insertIndex = beg;
2477  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
2478  {
2479  if(cmp(*memTypeIndex, *centerValue))
2480  {
2481  if(insertIndex != memTypeIndex)
2482  {
2483  VMA_SWAP(*memTypeIndex, *insertIndex);
2484  }
2485  ++insertIndex;
2486  }
2487  }
2488  if(insertIndex != centerValue)
2489  {
2490  VMA_SWAP(*insertIndex, *centerValue);
2491  }
2492  return insertIndex;
2493 }
2494 
2495 template<typename Iterator, typename Compare>
2496 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
2497 {
2498  if(beg < end)
2499  {
2500  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
2501  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
2502  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
2503  }
2504 }
2505 
2506 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
2507 
2508 #endif // #ifndef VMA_SORT
2509 
2510 /*
2511 Returns true if two memory blocks occupy overlapping pages.
2512 ResourceA must be in less memory offset than ResourceB.
2513 
2514 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
2515 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
2516 */
2517 static inline bool VmaBlocksOnSamePage(
2518  VkDeviceSize resourceAOffset,
2519  VkDeviceSize resourceASize,
2520  VkDeviceSize resourceBOffset,
2521  VkDeviceSize pageSize)
2522 {
2523  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
2524  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
2525  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
2526  VkDeviceSize resourceBStart = resourceBOffset;
2527  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
2528  return resourceAEndPage == resourceBStartPage;
2529 }
2530 
2531 enum VmaSuballocationType
2532 {
2533  VMA_SUBALLOCATION_TYPE_FREE = 0,
2534  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
2535  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
2536  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
2537  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
2538  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
2539  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
2540 };
2541 
2542 /*
2543 Returns true if given suballocation types could conflict and must respect
2544 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
2545 or linear image and another one is optimal image. If type is unknown, behave
2546 conservatively.
2547 */
2548 static inline bool VmaIsBufferImageGranularityConflict(
2549  VmaSuballocationType suballocType1,
2550  VmaSuballocationType suballocType2)
2551 {
2552  if(suballocType1 > suballocType2)
2553  {
2554  VMA_SWAP(suballocType1, suballocType2);
2555  }
2556 
2557  switch(suballocType1)
2558  {
2559  case VMA_SUBALLOCATION_TYPE_FREE:
2560  return false;
2561  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
2562  return true;
2563  case VMA_SUBALLOCATION_TYPE_BUFFER:
2564  return
2565  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2566  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2567  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
2568  return
2569  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2570  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
2571  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2572  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
2573  return
2574  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2575  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
2576  return false;
2577  default:
2578  VMA_ASSERT(0);
2579  return true;
2580  }
2581 }
2582 
2583 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
2584 struct VmaMutexLock
2585 {
2586 public:
2587  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
2588  m_pMutex(useMutex ? &mutex : VMA_NULL)
2589  {
2590  if(m_pMutex)
2591  {
2592  m_pMutex->Lock();
2593  }
2594  }
2595 
2596  ~VmaMutexLock()
2597  {
2598  if(m_pMutex)
2599  {
2600  m_pMutex->Unlock();
2601  }
2602  }
2603 
2604 private:
2605  VMA_MUTEX* m_pMutex;
2606 };
2607 
2608 #if VMA_DEBUG_GLOBAL_MUTEX
2609  static VMA_MUTEX gDebugGlobalMutex;
2610  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
2611 #else
2612  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
2613 #endif
2614 
2615 // Minimum size of a free suballocation to register it in the free suballocation collection.
2616 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
2617 
2618 /*
2619 Performs binary search and returns iterator to first element that is greater or
2620 equal to (key), according to comparison (cmp).
2621 
2622 Cmp should return true if first argument is less than second argument.
2623 
2624 Returned value is the found element, if present in the collection or place where
2625 new element with value (key) should be inserted.
2626 */
2627 template <typename IterT, typename KeyT, typename CmpT>
2628 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
2629 {
2630  size_t down = 0, up = (end - beg);
2631  while(down < up)
2632  {
2633  const size_t mid = (down + up) / 2;
2634  if(cmp(*(beg+mid), key))
2635  {
2636  down = mid + 1;
2637  }
2638  else
2639  {
2640  up = mid;
2641  }
2642  }
2643  return beg + down;
2644 }
2645 
2647 // Memory allocation
2648 
2649 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
2650 {
2651  if((pAllocationCallbacks != VMA_NULL) &&
2652  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
2653  {
2654  return (*pAllocationCallbacks->pfnAllocation)(
2655  pAllocationCallbacks->pUserData,
2656  size,
2657  alignment,
2658  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2659  }
2660  else
2661  {
2662  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
2663  }
2664 }
2665 
2666 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
2667 {
2668  if((pAllocationCallbacks != VMA_NULL) &&
2669  (pAllocationCallbacks->pfnFree != VMA_NULL))
2670  {
2671  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
2672  }
2673  else
2674  {
2675  VMA_SYSTEM_FREE(ptr);
2676  }
2677 }
2678 
2679 template<typename T>
2680 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
2681 {
2682  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
2683 }
2684 
2685 template<typename T>
2686 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
2687 {
2688  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
2689 }
2690 
2691 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
2692 
2693 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
2694 
2695 template<typename T>
2696 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
2697 {
2698  ptr->~T();
2699  VmaFree(pAllocationCallbacks, ptr);
2700 }
2701 
2702 template<typename T>
2703 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
2704 {
2705  if(ptr != VMA_NULL)
2706  {
2707  for(size_t i = count; i--; )
2708  {
2709  ptr[i].~T();
2710  }
2711  VmaFree(pAllocationCallbacks, ptr);
2712  }
2713 }
2714 
2715 // STL-compatible allocator.
2716 template<typename T>
2717 class VmaStlAllocator
2718 {
2719 public:
2720  const VkAllocationCallbacks* const m_pCallbacks;
2721  typedef T value_type;
2722 
2723  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
2724  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
2725 
2726  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
2727  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
2728 
2729  template<typename U>
2730  bool operator==(const VmaStlAllocator<U>& rhs) const
2731  {
2732  return m_pCallbacks == rhs.m_pCallbacks;
2733  }
2734  template<typename U>
2735  bool operator!=(const VmaStlAllocator<U>& rhs) const
2736  {
2737  return m_pCallbacks != rhs.m_pCallbacks;
2738  }
2739 
2740  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
2741 };
2742 
2743 #if VMA_USE_STL_VECTOR
2744 
2745 #define VmaVector std::vector
2746 
2747 template<typename T, typename allocatorT>
2748 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
2749 {
2750  vec.insert(vec.begin() + index, item);
2751 }
2752 
2753 template<typename T, typename allocatorT>
2754 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
2755 {
2756  vec.erase(vec.begin() + index);
2757 }
2758 
2759 #else // #if VMA_USE_STL_VECTOR
2760 
2761 /* Class with interface compatible with subset of std::vector.
2762 T must be POD because constructors and destructors are not called and memcpy is
2763 used for these objects. */
2764 template<typename T, typename AllocatorT>
2765 class VmaVector
2766 {
2767 public:
2768  typedef T value_type;
2769 
2770  VmaVector(const AllocatorT& allocator) :
2771  m_Allocator(allocator),
2772  m_pArray(VMA_NULL),
2773  m_Count(0),
2774  m_Capacity(0)
2775  {
2776  }
2777 
2778  VmaVector(size_t count, const AllocatorT& allocator) :
2779  m_Allocator(allocator),
2780  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
2781  m_Count(count),
2782  m_Capacity(count)
2783  {
2784  }
2785 
2786  VmaVector(const VmaVector<T, AllocatorT>& src) :
2787  m_Allocator(src.m_Allocator),
2788  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
2789  m_Count(src.m_Count),
2790  m_Capacity(src.m_Count)
2791  {
2792  if(m_Count != 0)
2793  {
2794  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
2795  }
2796  }
2797 
2798  ~VmaVector()
2799  {
2800  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2801  }
2802 
2803  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
2804  {
2805  if(&rhs != this)
2806  {
2807  resize(rhs.m_Count);
2808  if(m_Count != 0)
2809  {
2810  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
2811  }
2812  }
2813  return *this;
2814  }
2815 
2816  bool empty() const { return m_Count == 0; }
2817  size_t size() const { return m_Count; }
2818  T* data() { return m_pArray; }
2819  const T* data() const { return m_pArray; }
2820 
2821  T& operator[](size_t index)
2822  {
2823  VMA_HEAVY_ASSERT(index < m_Count);
2824  return m_pArray[index];
2825  }
2826  const T& operator[](size_t index) const
2827  {
2828  VMA_HEAVY_ASSERT(index < m_Count);
2829  return m_pArray[index];
2830  }
2831 
2832  T& front()
2833  {
2834  VMA_HEAVY_ASSERT(m_Count > 0);
2835  return m_pArray[0];
2836  }
2837  const T& front() const
2838  {
2839  VMA_HEAVY_ASSERT(m_Count > 0);
2840  return m_pArray[0];
2841  }
2842  T& back()
2843  {
2844  VMA_HEAVY_ASSERT(m_Count > 0);
2845  return m_pArray[m_Count - 1];
2846  }
2847  const T& back() const
2848  {
2849  VMA_HEAVY_ASSERT(m_Count > 0);
2850  return m_pArray[m_Count - 1];
2851  }
2852 
2853  void reserve(size_t newCapacity, bool freeMemory = false)
2854  {
2855  newCapacity = VMA_MAX(newCapacity, m_Count);
2856 
2857  if((newCapacity < m_Capacity) && !freeMemory)
2858  {
2859  newCapacity = m_Capacity;
2860  }
2861 
2862  if(newCapacity != m_Capacity)
2863  {
2864  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
2865  if(m_Count != 0)
2866  {
2867  memcpy(newArray, m_pArray, m_Count * sizeof(T));
2868  }
2869  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2870  m_Capacity = newCapacity;
2871  m_pArray = newArray;
2872  }
2873  }
2874 
2875  void resize(size_t newCount, bool freeMemory = false)
2876  {
2877  size_t newCapacity = m_Capacity;
2878  if(newCount > m_Capacity)
2879  {
2880  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
2881  }
2882  else if(freeMemory)
2883  {
2884  newCapacity = newCount;
2885  }
2886 
2887  if(newCapacity != m_Capacity)
2888  {
2889  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
2890  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
2891  if(elementsToCopy != 0)
2892  {
2893  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
2894  }
2895  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2896  m_Capacity = newCapacity;
2897  m_pArray = newArray;
2898  }
2899 
2900  m_Count = newCount;
2901  }
2902 
2903  void clear(bool freeMemory = false)
2904  {
2905  resize(0, freeMemory);
2906  }
2907 
2908  void insert(size_t index, const T& src)
2909  {
2910  VMA_HEAVY_ASSERT(index <= m_Count);
2911  const size_t oldCount = size();
2912  resize(oldCount + 1);
2913  if(index < oldCount)
2914  {
2915  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
2916  }
2917  m_pArray[index] = src;
2918  }
2919 
2920  void remove(size_t index)
2921  {
2922  VMA_HEAVY_ASSERT(index < m_Count);
2923  const size_t oldCount = size();
2924  if(index < oldCount - 1)
2925  {
2926  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
2927  }
2928  resize(oldCount - 1);
2929  }
2930 
2931  void push_back(const T& src)
2932  {
2933  const size_t newIndex = size();
2934  resize(newIndex + 1);
2935  m_pArray[newIndex] = src;
2936  }
2937 
2938  void pop_back()
2939  {
2940  VMA_HEAVY_ASSERT(m_Count > 0);
2941  resize(size() - 1);
2942  }
2943 
2944  void push_front(const T& src)
2945  {
2946  insert(0, src);
2947  }
2948 
2949  void pop_front()
2950  {
2951  VMA_HEAVY_ASSERT(m_Count > 0);
2952  remove(0);
2953  }
2954 
2955  typedef T* iterator;
2956 
2957  iterator begin() { return m_pArray; }
2958  iterator end() { return m_pArray + m_Count; }
2959 
2960 private:
2961  AllocatorT m_Allocator;
2962  T* m_pArray;
2963  size_t m_Count;
2964  size_t m_Capacity;
2965 };
2966 
2967 template<typename T, typename allocatorT>
2968 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
2969 {
2970  vec.insert(index, item);
2971 }
2972 
2973 template<typename T, typename allocatorT>
2974 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
2975 {
2976  vec.remove(index);
2977 }
2978 
2979 #endif // #if VMA_USE_STL_VECTOR
2980 
2981 template<typename CmpLess, typename VectorT>
2982 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
2983 {
2984  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2985  vector.data(),
2986  vector.data() + vector.size(),
2987  value,
2988  CmpLess()) - vector.data();
2989  VmaVectorInsert(vector, indexToInsert, value);
2990  return indexToInsert;
2991 }
2992 
2993 template<typename CmpLess, typename VectorT>
2994 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
2995 {
2996  CmpLess comparator;
2997  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2998  vector.begin(),
2999  vector.end(),
3000  value,
3001  comparator);
3002  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
3003  {
3004  size_t indexToRemove = it - vector.begin();
3005  VmaVectorRemove(vector, indexToRemove);
3006  return true;
3007  }
3008  return false;
3009 }
3010 
3011 template<typename CmpLess, typename VectorT>
3012 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
3013 {
3014  CmpLess comparator;
3015  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
3016  vector.data(),
3017  vector.data() + vector.size(),
3018  value,
3019  comparator);
3020  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
3021  {
3022  return it - vector.begin();
3023  }
3024  else
3025  {
3026  return vector.size();
3027  }
3028 }
3029 
3031 // class VmaPoolAllocator
3032 
3033 /*
3034 Allocator for objects of type T using a list of arrays (pools) to speed up
3035 allocation. Number of elements that can be allocated is not bounded because
3036 allocator can create multiple blocks.
3037 */
3038 template<typename T>
3039 class VmaPoolAllocator
3040 {
3041 public:
3042  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
3043  ~VmaPoolAllocator();
3044  void Clear();
3045  T* Alloc();
3046  void Free(T* ptr);
3047 
3048 private:
3049  union Item
3050  {
3051  uint32_t NextFreeIndex;
3052  T Value;
3053  };
3054 
3055  struct ItemBlock
3056  {
3057  Item* pItems;
3058  uint32_t FirstFreeIndex;
3059  };
3060 
3061  const VkAllocationCallbacks* m_pAllocationCallbacks;
3062  size_t m_ItemsPerBlock;
3063  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
3064 
3065  ItemBlock& CreateNewBlock();
3066 };
3067 
3068 template<typename T>
3069 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
3070  m_pAllocationCallbacks(pAllocationCallbacks),
3071  m_ItemsPerBlock(itemsPerBlock),
3072  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
3073 {
3074  VMA_ASSERT(itemsPerBlock > 0);
3075 }
3076 
3077 template<typename T>
3078 VmaPoolAllocator<T>::~VmaPoolAllocator()
3079 {
3080  Clear();
3081 }
3082 
3083 template<typename T>
3084 void VmaPoolAllocator<T>::Clear()
3085 {
3086  for(size_t i = m_ItemBlocks.size(); i--; )
3087  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
3088  m_ItemBlocks.clear();
3089 }
3090 
3091 template<typename T>
3092 T* VmaPoolAllocator<T>::Alloc()
3093 {
3094  for(size_t i = m_ItemBlocks.size(); i--; )
3095  {
3096  ItemBlock& block = m_ItemBlocks[i];
3097  // This block has some free items: Use first one.
3098  if(block.FirstFreeIndex != UINT32_MAX)
3099  {
3100  Item* const pItem = &block.pItems[block.FirstFreeIndex];
3101  block.FirstFreeIndex = pItem->NextFreeIndex;
3102  return &pItem->Value;
3103  }
3104  }
3105 
3106  // No block has free item: Create new one and use it.
3107  ItemBlock& newBlock = CreateNewBlock();
3108  Item* const pItem = &newBlock.pItems[0];
3109  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
3110  return &pItem->Value;
3111 }
3112 
3113 template<typename T>
3114 void VmaPoolAllocator<T>::Free(T* ptr)
3115 {
3116  // Search all memory blocks to find ptr.
3117  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
3118  {
3119  ItemBlock& block = m_ItemBlocks[i];
3120 
3121  // Casting to union.
3122  Item* pItemPtr;
3123  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
3124 
3125  // Check if pItemPtr is in address range of this block.
3126  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
3127  {
3128  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
3129  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
3130  block.FirstFreeIndex = index;
3131  return;
3132  }
3133  }
3134  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
3135 }
3136 
3137 template<typename T>
3138 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
3139 {
3140  ItemBlock newBlock = {
3141  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
3142 
3143  m_ItemBlocks.push_back(newBlock);
3144 
3145  // Setup singly-linked list of all free items in this block.
3146  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
3147  newBlock.pItems[i].NextFreeIndex = i + 1;
3148  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
3149  return m_ItemBlocks.back();
3150 }
3151 
3153 // class VmaRawList, VmaList
3154 
3155 #if VMA_USE_STL_LIST
3156 
3157 #define VmaList std::list
3158 
3159 #else // #if VMA_USE_STL_LIST
3160 
3161 template<typename T>
3162 struct VmaListItem
3163 {
3164  VmaListItem* pPrev;
3165  VmaListItem* pNext;
3166  T Value;
3167 };
3168 
3169 // Doubly linked list.
3170 template<typename T>
3171 class VmaRawList
3172 {
3173 public:
3174  typedef VmaListItem<T> ItemType;
3175 
3176  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
3177  ~VmaRawList();
3178  void Clear();
3179 
3180  size_t GetCount() const { return m_Count; }
3181  bool IsEmpty() const { return m_Count == 0; }
3182 
3183  ItemType* Front() { return m_pFront; }
3184  const ItemType* Front() const { return m_pFront; }
3185  ItemType* Back() { return m_pBack; }
3186  const ItemType* Back() const { return m_pBack; }
3187 
3188  ItemType* PushBack();
3189  ItemType* PushFront();
3190  ItemType* PushBack(const T& value);
3191  ItemType* PushFront(const T& value);
3192  void PopBack();
3193  void PopFront();
3194 
3195  // Item can be null - it means PushBack.
3196  ItemType* InsertBefore(ItemType* pItem);
3197  // Item can be null - it means PushFront.
3198  ItemType* InsertAfter(ItemType* pItem);
3199 
3200  ItemType* InsertBefore(ItemType* pItem, const T& value);
3201  ItemType* InsertAfter(ItemType* pItem, const T& value);
3202 
3203  void Remove(ItemType* pItem);
3204 
3205 private:
3206  const VkAllocationCallbacks* const m_pAllocationCallbacks;
3207  VmaPoolAllocator<ItemType> m_ItemAllocator;
3208  ItemType* m_pFront;
3209  ItemType* m_pBack;
3210  size_t m_Count;
3211 
3212  // Declared not defined, to block copy constructor and assignment operator.
3213  VmaRawList(const VmaRawList<T>& src);
3214  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
3215 };
3216 
3217 template<typename T>
3218 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
3219  m_pAllocationCallbacks(pAllocationCallbacks),
3220  m_ItemAllocator(pAllocationCallbacks, 128),
3221  m_pFront(VMA_NULL),
3222  m_pBack(VMA_NULL),
3223  m_Count(0)
3224 {
3225 }
3226 
3227 template<typename T>
3228 VmaRawList<T>::~VmaRawList()
3229 {
3230  // Intentionally not calling Clear, because that would be unnecessary
3231  // computations to return all items to m_ItemAllocator as free.
3232 }
3233 
3234 template<typename T>
3235 void VmaRawList<T>::Clear()
3236 {
3237  if(IsEmpty() == false)
3238  {
3239  ItemType* pItem = m_pBack;
3240  while(pItem != VMA_NULL)
3241  {
3242  ItemType* const pPrevItem = pItem->pPrev;
3243  m_ItemAllocator.Free(pItem);
3244  pItem = pPrevItem;
3245  }
3246  m_pFront = VMA_NULL;
3247  m_pBack = VMA_NULL;
3248  m_Count = 0;
3249  }
3250 }
3251 
3252 template<typename T>
3253 VmaListItem<T>* VmaRawList<T>::PushBack()
3254 {
3255  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3256  pNewItem->pNext = VMA_NULL;
3257  if(IsEmpty())
3258  {
3259  pNewItem->pPrev = VMA_NULL;
3260  m_pFront = pNewItem;
3261  m_pBack = pNewItem;
3262  m_Count = 1;
3263  }
3264  else
3265  {
3266  pNewItem->pPrev = m_pBack;
3267  m_pBack->pNext = pNewItem;
3268  m_pBack = pNewItem;
3269  ++m_Count;
3270  }
3271  return pNewItem;
3272 }
3273 
3274 template<typename T>
3275 VmaListItem<T>* VmaRawList<T>::PushFront()
3276 {
3277  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3278  pNewItem->pPrev = VMA_NULL;
3279  if(IsEmpty())
3280  {
3281  pNewItem->pNext = VMA_NULL;
3282  m_pFront = pNewItem;
3283  m_pBack = pNewItem;
3284  m_Count = 1;
3285  }
3286  else
3287  {
3288  pNewItem->pNext = m_pFront;
3289  m_pFront->pPrev = pNewItem;
3290  m_pFront = pNewItem;
3291  ++m_Count;
3292  }
3293  return pNewItem;
3294 }
3295 
3296 template<typename T>
3297 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
3298 {
3299  ItemType* const pNewItem = PushBack();
3300  pNewItem->Value = value;
3301  return pNewItem;
3302 }
3303 
3304 template<typename T>
3305 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
3306 {
3307  ItemType* const pNewItem = PushFront();
3308  pNewItem->Value = value;
3309  return pNewItem;
3310 }
3311 
3312 template<typename T>
3313 void VmaRawList<T>::PopBack()
3314 {
3315  VMA_HEAVY_ASSERT(m_Count > 0);
3316  ItemType* const pBackItem = m_pBack;
3317  ItemType* const pPrevItem = pBackItem->pPrev;
3318  if(pPrevItem != VMA_NULL)
3319  {
3320  pPrevItem->pNext = VMA_NULL;
3321  }
3322  m_pBack = pPrevItem;
3323  m_ItemAllocator.Free(pBackItem);
3324  --m_Count;
3325 }
3326 
3327 template<typename T>
3328 void VmaRawList<T>::PopFront()
3329 {
3330  VMA_HEAVY_ASSERT(m_Count > 0);
3331  ItemType* const pFrontItem = m_pFront;
3332  ItemType* const pNextItem = pFrontItem->pNext;
3333  if(pNextItem != VMA_NULL)
3334  {
3335  pNextItem->pPrev = VMA_NULL;
3336  }
3337  m_pFront = pNextItem;
3338  m_ItemAllocator.Free(pFrontItem);
3339  --m_Count;
3340 }
3341 
3342 template<typename T>
3343 void VmaRawList<T>::Remove(ItemType* pItem)
3344 {
3345  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
3346  VMA_HEAVY_ASSERT(m_Count > 0);
3347 
3348  if(pItem->pPrev != VMA_NULL)
3349  {
3350  pItem->pPrev->pNext = pItem->pNext;
3351  }
3352  else
3353  {
3354  VMA_HEAVY_ASSERT(m_pFront == pItem);
3355  m_pFront = pItem->pNext;
3356  }
3357 
3358  if(pItem->pNext != VMA_NULL)
3359  {
3360  pItem->pNext->pPrev = pItem->pPrev;
3361  }
3362  else
3363  {
3364  VMA_HEAVY_ASSERT(m_pBack == pItem);
3365  m_pBack = pItem->pPrev;
3366  }
3367 
3368  m_ItemAllocator.Free(pItem);
3369  --m_Count;
3370 }
3371 
3372 template<typename T>
3373 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
3374 {
3375  if(pItem != VMA_NULL)
3376  {
3377  ItemType* const prevItem = pItem->pPrev;
3378  ItemType* const newItem = m_ItemAllocator.Alloc();
3379  newItem->pPrev = prevItem;
3380  newItem->pNext = pItem;
3381  pItem->pPrev = newItem;
3382  if(prevItem != VMA_NULL)
3383  {
3384  prevItem->pNext = newItem;
3385  }
3386  else
3387  {
3388  VMA_HEAVY_ASSERT(m_pFront == pItem);
3389  m_pFront = newItem;
3390  }
3391  ++m_Count;
3392  return newItem;
3393  }
3394  else
3395  return PushBack();
3396 }
3397 
3398 template<typename T>
3399 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
3400 {
3401  if(pItem != VMA_NULL)
3402  {
3403  ItemType* const nextItem = pItem->pNext;
3404  ItemType* const newItem = m_ItemAllocator.Alloc();
3405  newItem->pNext = nextItem;
3406  newItem->pPrev = pItem;
3407  pItem->pNext = newItem;
3408  if(nextItem != VMA_NULL)
3409  {
3410  nextItem->pPrev = newItem;
3411  }
3412  else
3413  {
3414  VMA_HEAVY_ASSERT(m_pBack == pItem);
3415  m_pBack = newItem;
3416  }
3417  ++m_Count;
3418  return newItem;
3419  }
3420  else
3421  return PushFront();
3422 }
3423 
3424 template<typename T>
3425 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
3426 {
3427  ItemType* const newItem = InsertBefore(pItem);
3428  newItem->Value = value;
3429  return newItem;
3430 }
3431 
3432 template<typename T>
3433 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
3434 {
3435  ItemType* const newItem = InsertAfter(pItem);
3436  newItem->Value = value;
3437  return newItem;
3438 }
3439 
3440 template<typename T, typename AllocatorT>
3441 class VmaList
3442 {
3443 public:
3444  class iterator
3445  {
3446  public:
3447  iterator() :
3448  m_pList(VMA_NULL),
3449  m_pItem(VMA_NULL)
3450  {
3451  }
3452 
3453  T& operator*() const
3454  {
3455  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3456  return m_pItem->Value;
3457  }
3458  T* operator->() const
3459  {
3460  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3461  return &m_pItem->Value;
3462  }
3463 
3464  iterator& operator++()
3465  {
3466  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3467  m_pItem = m_pItem->pNext;
3468  return *this;
3469  }
3470  iterator& operator--()
3471  {
3472  if(m_pItem != VMA_NULL)
3473  {
3474  m_pItem = m_pItem->pPrev;
3475  }
3476  else
3477  {
3478  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3479  m_pItem = m_pList->Back();
3480  }
3481  return *this;
3482  }
3483 
3484  iterator operator++(int)
3485  {
3486  iterator result = *this;
3487  ++*this;
3488  return result;
3489  }
3490  iterator operator--(int)
3491  {
3492  iterator result = *this;
3493  --*this;
3494  return result;
3495  }
3496 
3497  bool operator==(const iterator& rhs) const
3498  {
3499  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3500  return m_pItem == rhs.m_pItem;
3501  }
3502  bool operator!=(const iterator& rhs) const
3503  {
3504  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3505  return m_pItem != rhs.m_pItem;
3506  }
3507 
3508  private:
3509  VmaRawList<T>* m_pList;
3510  VmaListItem<T>* m_pItem;
3511 
3512  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
3513  m_pList(pList),
3514  m_pItem(pItem)
3515  {
3516  }
3517 
3518  friend class VmaList<T, AllocatorT>;
3519  };
3520 
3521  class const_iterator
3522  {
3523  public:
3524  const_iterator() :
3525  m_pList(VMA_NULL),
3526  m_pItem(VMA_NULL)
3527  {
3528  }
3529 
3530  const_iterator(const iterator& src) :
3531  m_pList(src.m_pList),
3532  m_pItem(src.m_pItem)
3533  {
3534  }
3535 
3536  const T& operator*() const
3537  {
3538  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3539  return m_pItem->Value;
3540  }
3541  const T* operator->() const
3542  {
3543  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3544  return &m_pItem->Value;
3545  }
3546 
3547  const_iterator& operator++()
3548  {
3549  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3550  m_pItem = m_pItem->pNext;
3551  return *this;
3552  }
3553  const_iterator& operator--()
3554  {
3555  if(m_pItem != VMA_NULL)
3556  {
3557  m_pItem = m_pItem->pPrev;
3558  }
3559  else
3560  {
3561  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3562  m_pItem = m_pList->Back();
3563  }
3564  return *this;
3565  }
3566 
3567  const_iterator operator++(int)
3568  {
3569  const_iterator result = *this;
3570  ++*this;
3571  return result;
3572  }
3573  const_iterator operator--(int)
3574  {
3575  const_iterator result = *this;
3576  --*this;
3577  return result;
3578  }
3579 
3580  bool operator==(const const_iterator& rhs) const
3581  {
3582  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3583  return m_pItem == rhs.m_pItem;
3584  }
3585  bool operator!=(const const_iterator& rhs) const
3586  {
3587  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3588  return m_pItem != rhs.m_pItem;
3589  }
3590 
3591  private:
3592  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
3593  m_pList(pList),
3594  m_pItem(pItem)
3595  {
3596  }
3597 
3598  const VmaRawList<T>* m_pList;
3599  const VmaListItem<T>* m_pItem;
3600 
3601  friend class VmaList<T, AllocatorT>;
3602  };
3603 
3604  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
3605 
3606  bool empty() const { return m_RawList.IsEmpty(); }
3607  size_t size() const { return m_RawList.GetCount(); }
3608 
3609  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
3610  iterator end() { return iterator(&m_RawList, VMA_NULL); }
3611 
3612  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
3613  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
3614 
3615  void clear() { m_RawList.Clear(); }
3616  void push_back(const T& value) { m_RawList.PushBack(value); }
3617  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
3618  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
3619 
3620 private:
3621  VmaRawList<T> m_RawList;
3622 };
3623 
3624 #endif // #if VMA_USE_STL_LIST
3625 
3627 // class VmaMap
3628 
3629 // Unused in this version.
3630 #if 0
3631 
3632 #if VMA_USE_STL_UNORDERED_MAP
3633 
3634 #define VmaPair std::pair
3635 
3636 #define VMA_MAP_TYPE(KeyT, ValueT) \
3637  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
3638 
3639 #else // #if VMA_USE_STL_UNORDERED_MAP
3640 
3641 template<typename T1, typename T2>
3642 struct VmaPair
3643 {
3644  T1 first;
3645  T2 second;
3646 
3647  VmaPair() : first(), second() { }
3648  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
3649 };
3650 
3651 /* Class compatible with subset of interface of std::unordered_map.
3652 KeyT, ValueT must be POD because they will be stored in VmaVector.
3653 */
3654 template<typename KeyT, typename ValueT>
3655 class VmaMap
3656 {
3657 public:
3658  typedef VmaPair<KeyT, ValueT> PairType;
3659  typedef PairType* iterator;
3660 
3661  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
3662 
3663  iterator begin() { return m_Vector.begin(); }
3664  iterator end() { return m_Vector.end(); }
3665 
3666  void insert(const PairType& pair);
3667  iterator find(const KeyT& key);
3668  void erase(iterator it);
3669 
3670 private:
3671  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
3672 };
3673 
3674 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
3675 
3676 template<typename FirstT, typename SecondT>
3677 struct VmaPairFirstLess
3678 {
3679  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
3680  {
3681  return lhs.first < rhs.first;
3682  }
3683  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
3684  {
3685  return lhs.first < rhsFirst;
3686  }
3687 };
3688 
3689 template<typename KeyT, typename ValueT>
3690 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
3691 {
3692  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3693  m_Vector.data(),
3694  m_Vector.data() + m_Vector.size(),
3695  pair,
3696  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
3697  VmaVectorInsert(m_Vector, indexToInsert, pair);
3698 }
3699 
3700 template<typename KeyT, typename ValueT>
3701 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
3702 {
3703  PairType* it = VmaBinaryFindFirstNotLess(
3704  m_Vector.data(),
3705  m_Vector.data() + m_Vector.size(),
3706  key,
3707  VmaPairFirstLess<KeyT, ValueT>());
3708  if((it != m_Vector.end()) && (it->first == key))
3709  {
3710  return it;
3711  }
3712  else
3713  {
3714  return m_Vector.end();
3715  }
3716 }
3717 
3718 template<typename KeyT, typename ValueT>
3719 void VmaMap<KeyT, ValueT>::erase(iterator it)
3720 {
3721  VmaVectorRemove(m_Vector, it - m_Vector.begin());
3722 }
3723 
3724 #endif // #if VMA_USE_STL_UNORDERED_MAP
3725 
3726 #endif // #if 0
3727 
3729 
3730 class VmaDeviceMemoryBlock;
3731 
3732 struct VmaAllocation_T
3733 {
3734 private:
3735  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
3736 
3737  enum FLAGS
3738  {
3739  FLAG_USER_DATA_STRING = 0x01,
3740  };
3741 
3742 public:
3743  enum ALLOCATION_TYPE
3744  {
3745  ALLOCATION_TYPE_NONE,
3746  ALLOCATION_TYPE_BLOCK,
3747  ALLOCATION_TYPE_DEDICATED,
3748  };
3749 
3750  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
3751  m_Alignment(1),
3752  m_Size(0),
3753  m_pUserData(VMA_NULL),
3754  m_LastUseFrameIndex(currentFrameIndex),
3755  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
3756  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
3757  m_MapCount(0),
3758  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
3759  {
3760  }
3761 
3762  ~VmaAllocation_T()
3763  {
3764  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
3765 
3766  // Check if owned string was freed.
3767  VMA_ASSERT(m_pUserData == VMA_NULL);
3768  }
3769 
3770  void InitBlockAllocation(
3771  VmaPool hPool,
3772  VmaDeviceMemoryBlock* block,
3773  VkDeviceSize offset,
3774  VkDeviceSize alignment,
3775  VkDeviceSize size,
3776  VmaSuballocationType suballocationType,
3777  bool mapped,
3778  bool canBecomeLost)
3779  {
3780  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3781  VMA_ASSERT(block != VMA_NULL);
3782  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3783  m_Alignment = alignment;
3784  m_Size = size;
3785  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3786  m_SuballocationType = (uint8_t)suballocationType;
3787  m_BlockAllocation.m_hPool = hPool;
3788  m_BlockAllocation.m_Block = block;
3789  m_BlockAllocation.m_Offset = offset;
3790  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
3791  }
3792 
3793  void InitLost()
3794  {
3795  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3796  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
3797  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3798  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
3799  m_BlockAllocation.m_Block = VMA_NULL;
3800  m_BlockAllocation.m_Offset = 0;
3801  m_BlockAllocation.m_CanBecomeLost = true;
3802  }
3803 
3804  void ChangeBlockAllocation(
3805  VmaAllocator hAllocator,
3806  VmaDeviceMemoryBlock* block,
3807  VkDeviceSize offset);
3808 
3809  // pMappedData not null means allocation is created with MAPPED flag.
3810  void InitDedicatedAllocation(
3811  uint32_t memoryTypeIndex,
3812  VkDeviceMemory hMemory,
3813  VmaSuballocationType suballocationType,
3814  void* pMappedData,
3815  VkDeviceSize size)
3816  {
3817  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3818  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
3819  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
3820  m_Alignment = 0;
3821  m_Size = size;
3822  m_SuballocationType = (uint8_t)suballocationType;
3823  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3824  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
3825  m_DedicatedAllocation.m_hMemory = hMemory;
3826  m_DedicatedAllocation.m_pMappedData = pMappedData;
3827  }
3828 
3829  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
3830  VkDeviceSize GetAlignment() const { return m_Alignment; }
3831  VkDeviceSize GetSize() const { return m_Size; }
3832  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
3833  void* GetUserData() const { return m_pUserData; }
3834  void SetUserData(VmaAllocator hAllocator, void* pUserData);
3835  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
3836 
3837  VmaDeviceMemoryBlock* GetBlock() const
3838  {
3839  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
3840  return m_BlockAllocation.m_Block;
3841  }
3842  VkDeviceSize GetOffset() const;
3843  VkDeviceMemory GetMemory() const;
3844  uint32_t GetMemoryTypeIndex() const;
3845  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
3846  void* GetMappedData() const;
3847  bool CanBecomeLost() const;
3848  VmaPool GetPool() const;
3849 
3850  uint32_t GetLastUseFrameIndex() const
3851  {
3852  return m_LastUseFrameIndex.load();
3853  }
3854  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
3855  {
3856  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
3857  }
3858  /*
3859  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
3860  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
3861  - Else, returns false.
3862 
3863  If hAllocation is already lost, assert - you should not call it then.
3864  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
3865  */
3866  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3867 
3868  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
3869  {
3870  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
3871  outInfo.blockCount = 1;
3872  outInfo.allocationCount = 1;
3873  outInfo.unusedRangeCount = 0;
3874  outInfo.usedBytes = m_Size;
3875  outInfo.unusedBytes = 0;
3876  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
3877  outInfo.unusedRangeSizeMin = UINT64_MAX;
3878  outInfo.unusedRangeSizeMax = 0;
3879  }
3880 
3881  void BlockAllocMap();
3882  void BlockAllocUnmap();
3883  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
3884  void DedicatedAllocUnmap(VmaAllocator hAllocator);
3885 
3886 private:
3887  VkDeviceSize m_Alignment;
3888  VkDeviceSize m_Size;
3889  void* m_pUserData;
3890  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
3891  uint8_t m_Type; // ALLOCATION_TYPE
3892  uint8_t m_SuballocationType; // VmaSuballocationType
3893  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
3894  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
3895  uint8_t m_MapCount;
3896  uint8_t m_Flags; // enum FLAGS
3897 
3898  // Allocation out of VmaDeviceMemoryBlock.
3899  struct BlockAllocation
3900  {
3901  VmaPool m_hPool; // Null if belongs to general memory.
3902  VmaDeviceMemoryBlock* m_Block;
3903  VkDeviceSize m_Offset;
3904  bool m_CanBecomeLost;
3905  };
3906 
3907  // Allocation for an object that has its own private VkDeviceMemory.
3908  struct DedicatedAllocation
3909  {
3910  uint32_t m_MemoryTypeIndex;
3911  VkDeviceMemory m_hMemory;
3912  void* m_pMappedData; // Not null means memory is mapped.
3913  };
3914 
3915  union
3916  {
3917  // Allocation out of VmaDeviceMemoryBlock.
3918  BlockAllocation m_BlockAllocation;
3919  // Allocation for an object that has its own private VkDeviceMemory.
3920  DedicatedAllocation m_DedicatedAllocation;
3921  };
3922 
3923  void FreeUserDataString(VmaAllocator hAllocator);
3924 };
3925 
3926 /*
3927 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
3928 allocated memory block or free.
3929 */
3930 struct VmaSuballocation
3931 {
3932  VkDeviceSize offset;
3933  VkDeviceSize size;
3934  VmaAllocation hAllocation;
3935  VmaSuballocationType type;
3936 };
3937 
3938 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
3939 
3940 // Cost of one additional allocation lost, as equivalent in bytes.
3941 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
3942 
3943 /*
3944 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
3945 
3946 If canMakeOtherLost was false:
3947 - item points to a FREE suballocation.
3948 - itemsToMakeLostCount is 0.
3949 
3950 If canMakeOtherLost was true:
3951 - item points to first of sequence of suballocations, which are either FREE,
3952  or point to VmaAllocations that can become lost.
3953 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
3954  the requested allocation to succeed.
3955 */
3956 struct VmaAllocationRequest
3957 {
3958  VkDeviceSize offset;
3959  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
3960  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
3961  VmaSuballocationList::iterator item;
3962  size_t itemsToMakeLostCount;
3963 
3964  VkDeviceSize CalcCost() const
3965  {
3966  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
3967  }
3968 };
3969 
3970 /*
3971 Data structure used for bookkeeping of allocations and unused ranges of memory
3972 in a single VkDeviceMemory block.
3973 */
3974 class VmaBlockMetadata
3975 {
3976 public:
3977  VmaBlockMetadata(VmaAllocator hAllocator);
3978  ~VmaBlockMetadata();
3979  void Init(VkDeviceSize size);
3980 
3981  // Validates all data structures inside this object. If not valid, returns false.
3982  bool Validate() const;
3983  VkDeviceSize GetSize() const { return m_Size; }
3984  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
3985  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
3986  VkDeviceSize GetUnusedRangeSizeMax() const;
3987  // Returns true if this block is empty - contains only single free suballocation.
3988  bool IsEmpty() const;
3989 
3990  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
3991  void AddPoolStats(VmaPoolStats& inoutStats) const;
3992 
3993 #if VMA_STATS_STRING_ENABLED
3994  void PrintDetailedMap(class VmaJsonWriter& json) const;
3995 #endif
3996 
3997  // Creates trivial request for case when block is empty.
3998  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
3999 
4000  // Tries to find a place for suballocation with given parameters inside this block.
4001  // If succeeded, fills pAllocationRequest and returns true.
4002  // If failed, returns false.
4003  bool CreateAllocationRequest(
4004  uint32_t currentFrameIndex,
4005  uint32_t frameInUseCount,
4006  VkDeviceSize bufferImageGranularity,
4007  VkDeviceSize allocSize,
4008  VkDeviceSize allocAlignment,
4009  VmaSuballocationType allocType,
4010  bool canMakeOtherLost,
4011  VmaAllocationRequest* pAllocationRequest);
4012 
4013  bool MakeRequestedAllocationsLost(
4014  uint32_t currentFrameIndex,
4015  uint32_t frameInUseCount,
4016  VmaAllocationRequest* pAllocationRequest);
4017 
4018  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4019 
4020  // Makes actual allocation based on request. Request must already be checked and valid.
4021  void Alloc(
4022  const VmaAllocationRequest& request,
4023  VmaSuballocationType type,
4024  VkDeviceSize allocSize,
4025  VmaAllocation hAllocation);
4026 
4027  // Frees suballocation assigned to given memory region.
4028  void Free(const VmaAllocation allocation);
4029  void FreeAtOffset(VkDeviceSize offset);
4030 
4031 private:
4032  VkDeviceSize m_Size;
4033  uint32_t m_FreeCount;
4034  VkDeviceSize m_SumFreeSize;
4035  VmaSuballocationList m_Suballocations;
4036  // Suballocations that are free and have size greater than certain threshold.
4037  // Sorted by size, ascending.
4038  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
4039 
4040  bool ValidateFreeSuballocationList() const;
4041 
4042  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
4043  // If yes, fills pOffset and returns true. If no, returns false.
4044  bool CheckAllocation(
4045  uint32_t currentFrameIndex,
4046  uint32_t frameInUseCount,
4047  VkDeviceSize bufferImageGranularity,
4048  VkDeviceSize allocSize,
4049  VkDeviceSize allocAlignment,
4050  VmaSuballocationType allocType,
4051  VmaSuballocationList::const_iterator suballocItem,
4052  bool canMakeOtherLost,
4053  VkDeviceSize* pOffset,
4054  size_t* itemsToMakeLostCount,
4055  VkDeviceSize* pSumFreeSize,
4056  VkDeviceSize* pSumItemSize) const;
4057  // Given free suballocation, it merges it with following one, which must also be free.
4058  void MergeFreeWithNext(VmaSuballocationList::iterator item);
4059  // Releases given suballocation, making it free.
4060  // Merges it with adjacent free suballocations if applicable.
4061  // Returns iterator to new free suballocation at this place.
4062  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
4063  // Given free suballocation, it inserts it into sorted list of
4064  // m_FreeSuballocationsBySize if it's suitable.
4065  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
4066  // Given free suballocation, it removes it from sorted list of
4067  // m_FreeSuballocationsBySize if it's suitable.
4068  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
4069 };
4070 
4071 /*
4072 Represents a single block of device memory (`VkDeviceMemory`) with all the
4073 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
4074 
4075 Thread-safety: This class must be externally synchronized.
4076 */
4077 class VmaDeviceMemoryBlock
4078 {
4079 public:
4080  VmaBlockMetadata m_Metadata;
4081 
4082  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
4083 
4084  ~VmaDeviceMemoryBlock()
4085  {
4086  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
4087  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
4088  }
4089 
4090  // Always call after construction.
4091  void Init(
4092  uint32_t newMemoryTypeIndex,
4093  VkDeviceMemory newMemory,
4094  VkDeviceSize newSize);
4095  // Always call before destruction.
4096  void Destroy(VmaAllocator allocator);
4097 
4098  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
4099  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
4100  void* GetMappedData() const { return m_pMappedData; }
4101 
4102  // Validates all data structures inside this object. If not valid, returns false.
4103  bool Validate() const;
4104 
4105  // ppData can be null.
4106  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
4107  void Unmap(VmaAllocator hAllocator, uint32_t count);
4108 
4109  VkResult BindBufferMemory(
4110  const VmaAllocator hAllocator,
4111  const VmaAllocation hAllocation,
4112  VkBuffer hBuffer);
4113  VkResult BindImageMemory(
4114  const VmaAllocator hAllocator,
4115  const VmaAllocation hAllocation,
4116  VkImage hImage);
4117 
4118 private:
4119  uint32_t m_MemoryTypeIndex;
4120  VkDeviceMemory m_hMemory;
4121 
4122  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
4123  // Also protects m_MapCount, m_pMappedData.
4124  VMA_MUTEX m_Mutex;
4125  uint32_t m_MapCount;
4126  void* m_pMappedData;
4127 };
4128 
4129 struct VmaPointerLess
4130 {
4131  bool operator()(const void* lhs, const void* rhs) const
4132  {
4133  return lhs < rhs;
4134  }
4135 };
4136 
4137 class VmaDefragmentator;
4138 
4139 /*
4140 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
4141 Vulkan memory type.
4142 
4143 Synchronized internally with a mutex.
4144 */
4145 struct VmaBlockVector
4146 {
4147  VmaBlockVector(
4148  VmaAllocator hAllocator,
4149  uint32_t memoryTypeIndex,
4150  VkDeviceSize preferredBlockSize,
4151  size_t minBlockCount,
4152  size_t maxBlockCount,
4153  VkDeviceSize bufferImageGranularity,
4154  uint32_t frameInUseCount,
4155  bool isCustomPool);
4156  ~VmaBlockVector();
4157 
4158  VkResult CreateMinBlocks();
4159 
4160  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
4161  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
4162  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
4163  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
4164 
4165  void GetPoolStats(VmaPoolStats* pStats);
4166 
4167  bool IsEmpty() const { return m_Blocks.empty(); }
4168 
4169  VkResult Allocate(
4170  VmaPool hCurrentPool,
4171  uint32_t currentFrameIndex,
4172  const VkMemoryRequirements& vkMemReq,
4173  const VmaAllocationCreateInfo& createInfo,
4174  VmaSuballocationType suballocType,
4175  VmaAllocation* pAllocation);
4176 
4177  void Free(
4178  VmaAllocation hAllocation);
4179 
4180  // Adds statistics of this BlockVector to pStats.
4181  void AddStats(VmaStats* pStats);
4182 
4183 #if VMA_STATS_STRING_ENABLED
4184  void PrintDetailedMap(class VmaJsonWriter& json);
4185 #endif
4186 
4187  void MakePoolAllocationsLost(
4188  uint32_t currentFrameIndex,
4189  size_t* pLostAllocationCount);
4190 
4191  VmaDefragmentator* EnsureDefragmentator(
4192  VmaAllocator hAllocator,
4193  uint32_t currentFrameIndex);
4194 
4195  VkResult Defragment(
4196  VmaDefragmentationStats* pDefragmentationStats,
4197  VkDeviceSize& maxBytesToMove,
4198  uint32_t& maxAllocationsToMove);
4199 
4200  void DestroyDefragmentator();
4201 
4202 private:
4203  friend class VmaDefragmentator;
4204 
4205  const VmaAllocator m_hAllocator;
4206  const uint32_t m_MemoryTypeIndex;
4207  const VkDeviceSize m_PreferredBlockSize;
4208  const size_t m_MinBlockCount;
4209  const size_t m_MaxBlockCount;
4210  const VkDeviceSize m_BufferImageGranularity;
4211  const uint32_t m_FrameInUseCount;
4212  const bool m_IsCustomPool;
4213  VMA_MUTEX m_Mutex;
4214  // Incrementally sorted by sumFreeSize, ascending.
4215  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
4216  /* There can be at most one allocation that is completely empty - a
4217  hysteresis to avoid pessimistic case of alternating creation and destruction
4218  of a VkDeviceMemory. */
4219  bool m_HasEmptyBlock;
4220  VmaDefragmentator* m_pDefragmentator;
4221 
4222  size_t CalcMaxBlockSize() const;
4223 
4224  // Finds and removes given block from vector.
4225  void Remove(VmaDeviceMemoryBlock* pBlock);
4226 
4227  // Performs single step in sorting m_Blocks. They may not be fully sorted
4228  // after this call.
4229  void IncrementallySortBlocks();
4230 
4231  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
4232 };
4233 
4234 struct VmaPool_T
4235 {
4236 public:
4237  VmaBlockVector m_BlockVector;
4238 
4239  // Takes ownership.
4240  VmaPool_T(
4241  VmaAllocator hAllocator,
4242  const VmaPoolCreateInfo& createInfo);
4243  ~VmaPool_T();
4244 
4245  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
4246 
4247 #if VMA_STATS_STRING_ENABLED
4248  //void PrintDetailedMap(class VmaStringBuilder& sb);
4249 #endif
4250 };
4251 
4252 class VmaDefragmentator
4253 {
4254  const VmaAllocator m_hAllocator;
4255  VmaBlockVector* const m_pBlockVector;
4256  uint32_t m_CurrentFrameIndex;
4257  VkDeviceSize m_BytesMoved;
4258  uint32_t m_AllocationsMoved;
4259 
4260  struct AllocationInfo
4261  {
4262  VmaAllocation m_hAllocation;
4263  VkBool32* m_pChanged;
4264 
4265  AllocationInfo() :
4266  m_hAllocation(VK_NULL_HANDLE),
4267  m_pChanged(VMA_NULL)
4268  {
4269  }
4270  };
4271 
4272  struct AllocationInfoSizeGreater
4273  {
4274  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
4275  {
4276  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
4277  }
4278  };
4279 
4280  // Used between AddAllocation and Defragment.
4281  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
4282 
4283  struct BlockInfo
4284  {
4285  VmaDeviceMemoryBlock* m_pBlock;
4286  bool m_HasNonMovableAllocations;
4287  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
4288 
4289  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
4290  m_pBlock(VMA_NULL),
4291  m_HasNonMovableAllocations(true),
4292  m_Allocations(pAllocationCallbacks),
4293  m_pMappedDataForDefragmentation(VMA_NULL)
4294  {
4295  }
4296 
4297  void CalcHasNonMovableAllocations()
4298  {
4299  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
4300  const size_t defragmentAllocCount = m_Allocations.size();
4301  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
4302  }
4303 
4304  void SortAllocationsBySizeDescecnding()
4305  {
4306  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
4307  }
4308 
4309  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
4310  void Unmap(VmaAllocator hAllocator);
4311 
4312  private:
4313  // Not null if mapped for defragmentation only, not originally mapped.
4314  void* m_pMappedDataForDefragmentation;
4315  };
4316 
4317  struct BlockPointerLess
4318  {
4319  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
4320  {
4321  return pLhsBlockInfo->m_pBlock < pRhsBlock;
4322  }
4323  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
4324  {
4325  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
4326  }
4327  };
4328 
4329  // 1. Blocks with some non-movable allocations go first.
4330  // 2. Blocks with smaller sumFreeSize go first.
4331  struct BlockInfoCompareMoveDestination
4332  {
4333  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
4334  {
4335  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
4336  {
4337  return true;
4338  }
4339  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
4340  {
4341  return false;
4342  }
4343  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
4344  {
4345  return true;
4346  }
4347  return false;
4348  }
4349  };
4350 
4351  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
4352  BlockInfoVector m_Blocks;
4353 
4354  VkResult DefragmentRound(
4355  VkDeviceSize maxBytesToMove,
4356  uint32_t maxAllocationsToMove);
4357 
4358  static bool MoveMakesSense(
4359  size_t dstBlockIndex, VkDeviceSize dstOffset,
4360  size_t srcBlockIndex, VkDeviceSize srcOffset);
4361 
4362 public:
4363  VmaDefragmentator(
4364  VmaAllocator hAllocator,
4365  VmaBlockVector* pBlockVector,
4366  uint32_t currentFrameIndex);
4367 
4368  ~VmaDefragmentator();
4369 
4370  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
4371  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
4372 
4373  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
4374 
4375  VkResult Defragment(
4376  VkDeviceSize maxBytesToMove,
4377  uint32_t maxAllocationsToMove);
4378 };
4379 
4380 // Main allocator object.
4381 struct VmaAllocator_T
4382 {
4383  bool m_UseMutex;
4384  bool m_UseKhrDedicatedAllocation;
4385  VkDevice m_hDevice;
4386  bool m_AllocationCallbacksSpecified;
4387  VkAllocationCallbacks m_AllocationCallbacks;
4388  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
4389 
4390  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
4391  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
4392  VMA_MUTEX m_HeapSizeLimitMutex;
4393 
4394  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
4395  VkPhysicalDeviceMemoryProperties m_MemProps;
4396 
4397  // Default pools.
4398  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
4399 
4400  // Each vector is sorted by memory (handle value).
4401  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
4402  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
4403  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
4404 
4405  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
4406  ~VmaAllocator_T();
4407 
4408  const VkAllocationCallbacks* GetAllocationCallbacks() const
4409  {
4410  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
4411  }
4412  const VmaVulkanFunctions& GetVulkanFunctions() const
4413  {
4414  return m_VulkanFunctions;
4415  }
4416 
4417  VkDeviceSize GetBufferImageGranularity() const
4418  {
4419  return VMA_MAX(
4420  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
4421  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
4422  }
4423 
4424  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
4425  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
4426 
4427  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
4428  {
4429  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
4430  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
4431  }
4432 
4433  void GetBufferMemoryRequirements(
4434  VkBuffer hBuffer,
4435  VkMemoryRequirements& memReq,
4436  bool& requiresDedicatedAllocation,
4437  bool& prefersDedicatedAllocation) const;
4438  void GetImageMemoryRequirements(
4439  VkImage hImage,
4440  VkMemoryRequirements& memReq,
4441  bool& requiresDedicatedAllocation,
4442  bool& prefersDedicatedAllocation) const;
4443 
4444  // Main allocation function.
4445  VkResult AllocateMemory(
4446  const VkMemoryRequirements& vkMemReq,
4447  bool requiresDedicatedAllocation,
4448  bool prefersDedicatedAllocation,
4449  VkBuffer dedicatedBuffer,
4450  VkImage dedicatedImage,
4451  const VmaAllocationCreateInfo& createInfo,
4452  VmaSuballocationType suballocType,
4453  VmaAllocation* pAllocation);
4454 
4455  // Main deallocation function.
4456  void FreeMemory(const VmaAllocation allocation);
4457 
4458  void CalculateStats(VmaStats* pStats);
4459 
4460 #if VMA_STATS_STRING_ENABLED
4461  void PrintDetailedMap(class VmaJsonWriter& json);
4462 #endif
4463 
4464  VkResult Defragment(
4465  VmaAllocation* pAllocations,
4466  size_t allocationCount,
4467  VkBool32* pAllocationsChanged,
4468  const VmaDefragmentationInfo* pDefragmentationInfo,
4469  VmaDefragmentationStats* pDefragmentationStats);
4470 
4471  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
4472  bool TouchAllocation(VmaAllocation hAllocation);
4473 
4474  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
4475  void DestroyPool(VmaPool pool);
4476  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
4477 
4478  void SetCurrentFrameIndex(uint32_t frameIndex);
4479 
4480  void MakePoolAllocationsLost(
4481  VmaPool hPool,
4482  size_t* pLostAllocationCount);
4483 
4484  void CreateLostAllocation(VmaAllocation* pAllocation);
4485 
4486  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
4487  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
4488 
4489  VkResult Map(VmaAllocation hAllocation, void** ppData);
4490  void Unmap(VmaAllocation hAllocation);
4491 
4492  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
4493  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
4494 
4495 private:
4496  VkDeviceSize m_PreferredLargeHeapBlockSize;
4497 
4498  VkPhysicalDevice m_PhysicalDevice;
4499  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
4500 
4501  VMA_MUTEX m_PoolsMutex;
4502  // Protected by m_PoolsMutex. Sorted by pointer value.
4503  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
4504 
4505  VmaVulkanFunctions m_VulkanFunctions;
4506 
4507  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
4508 
4509  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
4510 
4511  VkResult AllocateMemoryOfType(
4512  const VkMemoryRequirements& vkMemReq,
4513  bool dedicatedAllocation,
4514  VkBuffer dedicatedBuffer,
4515  VkImage dedicatedImage,
4516  const VmaAllocationCreateInfo& createInfo,
4517  uint32_t memTypeIndex,
4518  VmaSuballocationType suballocType,
4519  VmaAllocation* pAllocation);
4520 
4521  // Allocates and registers new VkDeviceMemory specifically for single allocation.
4522  VkResult AllocateDedicatedMemory(
4523  VkDeviceSize size,
4524  VmaSuballocationType suballocType,
4525  uint32_t memTypeIndex,
4526  bool map,
4527  bool isUserDataString,
4528  void* pUserData,
4529  VkBuffer dedicatedBuffer,
4530  VkImage dedicatedImage,
4531  VmaAllocation* pAllocation);
4532 
4533  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
4534  void FreeDedicatedMemory(VmaAllocation allocation);
4535 };
4536 
4538 // Memory allocation #2 after VmaAllocator_T definition
4539 
4540 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
4541 {
4542  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
4543 }
4544 
4545 static void VmaFree(VmaAllocator hAllocator, void* ptr)
4546 {
4547  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
4548 }
4549 
4550 template<typename T>
4551 static T* VmaAllocate(VmaAllocator hAllocator)
4552 {
4553  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
4554 }
4555 
4556 template<typename T>
4557 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
4558 {
4559  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
4560 }
4561 
4562 template<typename T>
4563 static void vma_delete(VmaAllocator hAllocator, T* ptr)
4564 {
4565  if(ptr != VMA_NULL)
4566  {
4567  ptr->~T();
4568  VmaFree(hAllocator, ptr);
4569  }
4570 }
4571 
4572 template<typename T>
4573 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
4574 {
4575  if(ptr != VMA_NULL)
4576  {
4577  for(size_t i = count; i--; )
4578  ptr[i].~T();
4579  VmaFree(hAllocator, ptr);
4580  }
4581 }
4582 
4584 // VmaStringBuilder
4585 
4586 #if VMA_STATS_STRING_ENABLED
4587 
4588 class VmaStringBuilder
4589 {
4590 public:
4591  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
4592  size_t GetLength() const { return m_Data.size(); }
4593  const char* GetData() const { return m_Data.data(); }
4594 
4595  void Add(char ch) { m_Data.push_back(ch); }
4596  void Add(const char* pStr);
4597  void AddNewLine() { Add('\n'); }
4598  void AddNumber(uint32_t num);
4599  void AddNumber(uint64_t num);
4600  void AddPointer(const void* ptr);
4601 
4602 private:
4603  VmaVector< char, VmaStlAllocator<char> > m_Data;
4604 };
4605 
4606 void VmaStringBuilder::Add(const char* pStr)
4607 {
4608  const size_t strLen = strlen(pStr);
4609  if(strLen > 0)
4610  {
4611  const size_t oldCount = m_Data.size();
4612  m_Data.resize(oldCount + strLen);
4613  memcpy(m_Data.data() + oldCount, pStr, strLen);
4614  }
4615 }
4616 
4617 void VmaStringBuilder::AddNumber(uint32_t num)
4618 {
4619  char buf[11];
4620  VmaUint32ToStr(buf, sizeof(buf), num);
4621  Add(buf);
4622 }
4623 
4624 void VmaStringBuilder::AddNumber(uint64_t num)
4625 {
4626  char buf[21];
4627  VmaUint64ToStr(buf, sizeof(buf), num);
4628  Add(buf);
4629 }
4630 
4631 void VmaStringBuilder::AddPointer(const void* ptr)
4632 {
4633  char buf[21];
4634  VmaPtrToStr(buf, sizeof(buf), ptr);
4635  Add(buf);
4636 }
4637 
4638 #endif // #if VMA_STATS_STRING_ENABLED
4639 
4641 // VmaJsonWriter
4642 
4643 #if VMA_STATS_STRING_ENABLED
4644 
4645 class VmaJsonWriter
4646 {
4647 public:
4648  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
4649  ~VmaJsonWriter();
4650 
4651  void BeginObject(bool singleLine = false);
4652  void EndObject();
4653 
4654  void BeginArray(bool singleLine = false);
4655  void EndArray();
4656 
4657  void WriteString(const char* pStr);
4658  void BeginString(const char* pStr = VMA_NULL);
4659  void ContinueString(const char* pStr);
4660  void ContinueString(uint32_t n);
4661  void ContinueString(uint64_t n);
4662  void ContinueString_Pointer(const void* ptr);
4663  void EndString(const char* pStr = VMA_NULL);
4664 
4665  void WriteNumber(uint32_t n);
4666  void WriteNumber(uint64_t n);
4667  void WriteBool(bool b);
4668  void WriteNull();
4669 
4670 private:
4671  static const char* const INDENT;
4672 
4673  enum COLLECTION_TYPE
4674  {
4675  COLLECTION_TYPE_OBJECT,
4676  COLLECTION_TYPE_ARRAY,
4677  };
4678  struct StackItem
4679  {
4680  COLLECTION_TYPE type;
4681  uint32_t valueCount;
4682  bool singleLineMode;
4683  };
4684 
4685  VmaStringBuilder& m_SB;
4686  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
4687  bool m_InsideString;
4688 
4689  void BeginValue(bool isString);
4690  void WriteIndent(bool oneLess = false);
4691 };
4692 
4693 const char* const VmaJsonWriter::INDENT = " ";
4694 
4695 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
4696  m_SB(sb),
4697  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
4698  m_InsideString(false)
4699 {
4700 }
4701 
4702 VmaJsonWriter::~VmaJsonWriter()
4703 {
4704  VMA_ASSERT(!m_InsideString);
4705  VMA_ASSERT(m_Stack.empty());
4706 }
4707 
4708 void VmaJsonWriter::BeginObject(bool singleLine)
4709 {
4710  VMA_ASSERT(!m_InsideString);
4711 
4712  BeginValue(false);
4713  m_SB.Add('{');
4714 
4715  StackItem item;
4716  item.type = COLLECTION_TYPE_OBJECT;
4717  item.valueCount = 0;
4718  item.singleLineMode = singleLine;
4719  m_Stack.push_back(item);
4720 }
4721 
4722 void VmaJsonWriter::EndObject()
4723 {
4724  VMA_ASSERT(!m_InsideString);
4725 
4726  WriteIndent(true);
4727  m_SB.Add('}');
4728 
4729  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
4730  m_Stack.pop_back();
4731 }
4732 
4733 void VmaJsonWriter::BeginArray(bool singleLine)
4734 {
4735  VMA_ASSERT(!m_InsideString);
4736 
4737  BeginValue(false);
4738  m_SB.Add('[');
4739 
4740  StackItem item;
4741  item.type = COLLECTION_TYPE_ARRAY;
4742  item.valueCount = 0;
4743  item.singleLineMode = singleLine;
4744  m_Stack.push_back(item);
4745 }
4746 
4747 void VmaJsonWriter::EndArray()
4748 {
4749  VMA_ASSERT(!m_InsideString);
4750 
4751  WriteIndent(true);
4752  m_SB.Add(']');
4753 
4754  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
4755  m_Stack.pop_back();
4756 }
4757 
4758 void VmaJsonWriter::WriteString(const char* pStr)
4759 {
4760  BeginString(pStr);
4761  EndString();
4762 }
4763 
4764 void VmaJsonWriter::BeginString(const char* pStr)
4765 {
4766  VMA_ASSERT(!m_InsideString);
4767 
4768  BeginValue(true);
4769  m_SB.Add('"');
4770  m_InsideString = true;
4771  if(pStr != VMA_NULL && pStr[0] != '\0')
4772  {
4773  ContinueString(pStr);
4774  }
4775 }
4776 
4777 void VmaJsonWriter::ContinueString(const char* pStr)
4778 {
4779  VMA_ASSERT(m_InsideString);
4780 
4781  const size_t strLen = strlen(pStr);
4782  for(size_t i = 0; i < strLen; ++i)
4783  {
4784  char ch = pStr[i];
4785  if(ch == '\'')
4786  {
4787  m_SB.Add("\\\\");
4788  }
4789  else if(ch == '"')
4790  {
4791  m_SB.Add("\\\"");
4792  }
4793  else if(ch >= 32)
4794  {
4795  m_SB.Add(ch);
4796  }
4797  else switch(ch)
4798  {
4799  case '\b':
4800  m_SB.Add("\\b");
4801  break;
4802  case '\f':
4803  m_SB.Add("\\f");
4804  break;
4805  case '\n':
4806  m_SB.Add("\\n");
4807  break;
4808  case '\r':
4809  m_SB.Add("\\r");
4810  break;
4811  case '\t':
4812  m_SB.Add("\\t");
4813  break;
4814  default:
4815  VMA_ASSERT(0 && "Character not currently supported.");
4816  break;
4817  }
4818  }
4819 }
4820 
4821 void VmaJsonWriter::ContinueString(uint32_t n)
4822 {
4823  VMA_ASSERT(m_InsideString);
4824  m_SB.AddNumber(n);
4825 }
4826 
4827 void VmaJsonWriter::ContinueString(uint64_t n)
4828 {
4829  VMA_ASSERT(m_InsideString);
4830  m_SB.AddNumber(n);
4831 }
4832 
4833 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
4834 {
4835  VMA_ASSERT(m_InsideString);
4836  m_SB.AddPointer(ptr);
4837 }
4838 
4839 void VmaJsonWriter::EndString(const char* pStr)
4840 {
4841  VMA_ASSERT(m_InsideString);
4842  if(pStr != VMA_NULL && pStr[0] != '\0')
4843  {
4844  ContinueString(pStr);
4845  }
4846  m_SB.Add('"');
4847  m_InsideString = false;
4848 }
4849 
4850 void VmaJsonWriter::WriteNumber(uint32_t n)
4851 {
4852  VMA_ASSERT(!m_InsideString);
4853  BeginValue(false);
4854  m_SB.AddNumber(n);
4855 }
4856 
4857 void VmaJsonWriter::WriteNumber(uint64_t n)
4858 {
4859  VMA_ASSERT(!m_InsideString);
4860  BeginValue(false);
4861  m_SB.AddNumber(n);
4862 }
4863 
4864 void VmaJsonWriter::WriteBool(bool b)
4865 {
4866  VMA_ASSERT(!m_InsideString);
4867  BeginValue(false);
4868  m_SB.Add(b ? "true" : "false");
4869 }
4870 
4871 void VmaJsonWriter::WriteNull()
4872 {
4873  VMA_ASSERT(!m_InsideString);
4874  BeginValue(false);
4875  m_SB.Add("null");
4876 }
4877 
4878 void VmaJsonWriter::BeginValue(bool isString)
4879 {
4880  if(!m_Stack.empty())
4881  {
4882  StackItem& currItem = m_Stack.back();
4883  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4884  currItem.valueCount % 2 == 0)
4885  {
4886  VMA_ASSERT(isString);
4887  }
4888 
4889  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4890  currItem.valueCount % 2 != 0)
4891  {
4892  m_SB.Add(": ");
4893  }
4894  else if(currItem.valueCount > 0)
4895  {
4896  m_SB.Add(", ");
4897  WriteIndent();
4898  }
4899  else
4900  {
4901  WriteIndent();
4902  }
4903  ++currItem.valueCount;
4904  }
4905 }
4906 
4907 void VmaJsonWriter::WriteIndent(bool oneLess)
4908 {
4909  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
4910  {
4911  m_SB.AddNewLine();
4912 
4913  size_t count = m_Stack.size();
4914  if(count > 0 && oneLess)
4915  {
4916  --count;
4917  }
4918  for(size_t i = 0; i < count; ++i)
4919  {
4920  m_SB.Add(INDENT);
4921  }
4922  }
4923 }
4924 
4925 #endif // #if VMA_STATS_STRING_ENABLED
4926 
4928 
4929 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
4930 {
4931  if(IsUserDataString())
4932  {
4933  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
4934 
4935  FreeUserDataString(hAllocator);
4936 
4937  if(pUserData != VMA_NULL)
4938  {
4939  const char* const newStrSrc = (char*)pUserData;
4940  const size_t newStrLen = strlen(newStrSrc);
4941  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
4942  memcpy(newStrDst, newStrSrc, newStrLen + 1);
4943  m_pUserData = newStrDst;
4944  }
4945  }
4946  else
4947  {
4948  m_pUserData = pUserData;
4949  }
4950 }
4951 
4952 void VmaAllocation_T::ChangeBlockAllocation(
4953  VmaAllocator hAllocator,
4954  VmaDeviceMemoryBlock* block,
4955  VkDeviceSize offset)
4956 {
4957  VMA_ASSERT(block != VMA_NULL);
4958  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4959 
4960  // Move mapping reference counter from old block to new block.
4961  if(block != m_BlockAllocation.m_Block)
4962  {
4963  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
4964  if(IsPersistentMap())
4965  ++mapRefCount;
4966  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
4967  block->Map(hAllocator, mapRefCount, VMA_NULL);
4968  }
4969 
4970  m_BlockAllocation.m_Block = block;
4971  m_BlockAllocation.m_Offset = offset;
4972 }
4973 
4974 VkDeviceSize VmaAllocation_T::GetOffset() const
4975 {
4976  switch(m_Type)
4977  {
4978  case ALLOCATION_TYPE_BLOCK:
4979  return m_BlockAllocation.m_Offset;
4980  case ALLOCATION_TYPE_DEDICATED:
4981  return 0;
4982  default:
4983  VMA_ASSERT(0);
4984  return 0;
4985  }
4986 }
4987 
4988 VkDeviceMemory VmaAllocation_T::GetMemory() const
4989 {
4990  switch(m_Type)
4991  {
4992  case ALLOCATION_TYPE_BLOCK:
4993  return m_BlockAllocation.m_Block->GetDeviceMemory();
4994  case ALLOCATION_TYPE_DEDICATED:
4995  return m_DedicatedAllocation.m_hMemory;
4996  default:
4997  VMA_ASSERT(0);
4998  return VK_NULL_HANDLE;
4999  }
5000 }
5001 
5002 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
5003 {
5004  switch(m_Type)
5005  {
5006  case ALLOCATION_TYPE_BLOCK:
5007  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
5008  case ALLOCATION_TYPE_DEDICATED:
5009  return m_DedicatedAllocation.m_MemoryTypeIndex;
5010  default:
5011  VMA_ASSERT(0);
5012  return UINT32_MAX;
5013  }
5014 }
5015 
5016 void* VmaAllocation_T::GetMappedData() const
5017 {
5018  switch(m_Type)
5019  {
5020  case ALLOCATION_TYPE_BLOCK:
5021  if(m_MapCount != 0)
5022  {
5023  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
5024  VMA_ASSERT(pBlockData != VMA_NULL);
5025  return (char*)pBlockData + m_BlockAllocation.m_Offset;
5026  }
5027  else
5028  {
5029  return VMA_NULL;
5030  }
5031  break;
5032  case ALLOCATION_TYPE_DEDICATED:
5033  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
5034  return m_DedicatedAllocation.m_pMappedData;
5035  default:
5036  VMA_ASSERT(0);
5037  return VMA_NULL;
5038  }
5039 }
5040 
5041 bool VmaAllocation_T::CanBecomeLost() const
5042 {
5043  switch(m_Type)
5044  {
5045  case ALLOCATION_TYPE_BLOCK:
5046  return m_BlockAllocation.m_CanBecomeLost;
5047  case ALLOCATION_TYPE_DEDICATED:
5048  return false;
5049  default:
5050  VMA_ASSERT(0);
5051  return false;
5052  }
5053 }
5054 
5055 VmaPool VmaAllocation_T::GetPool() const
5056 {
5057  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5058  return m_BlockAllocation.m_hPool;
5059 }
5060 
5061 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
5062 {
5063  VMA_ASSERT(CanBecomeLost());
5064 
5065  /*
5066  Warning: This is a carefully designed algorithm.
5067  Do not modify unless you really know what you're doing :)
5068  */
5069  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
5070  for(;;)
5071  {
5072  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
5073  {
5074  VMA_ASSERT(0);
5075  return false;
5076  }
5077  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
5078  {
5079  return false;
5080  }
5081  else // Last use time earlier than current time.
5082  {
5083  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
5084  {
5085  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
5086  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
5087  return true;
5088  }
5089  }
5090  }
5091 }
5092 
5093 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
5094 {
5095  VMA_ASSERT(IsUserDataString());
5096  if(m_pUserData != VMA_NULL)
5097  {
5098  char* const oldStr = (char*)m_pUserData;
5099  const size_t oldStrLen = strlen(oldStr);
5100  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
5101  m_pUserData = VMA_NULL;
5102  }
5103 }
5104 
5105 void VmaAllocation_T::BlockAllocMap()
5106 {
5107  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
5108 
5109  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
5110  {
5111  ++m_MapCount;
5112  }
5113  else
5114  {
5115  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
5116  }
5117 }
5118 
5119 void VmaAllocation_T::BlockAllocUnmap()
5120 {
5121  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
5122 
5123  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
5124  {
5125  --m_MapCount;
5126  }
5127  else
5128  {
5129  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
5130  }
5131 }
5132 
5133 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
5134 {
5135  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
5136 
5137  if(m_MapCount != 0)
5138  {
5139  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
5140  {
5141  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
5142  *ppData = m_DedicatedAllocation.m_pMappedData;
5143  ++m_MapCount;
5144  return VK_SUCCESS;
5145  }
5146  else
5147  {
5148  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
5149  return VK_ERROR_MEMORY_MAP_FAILED;
5150  }
5151  }
5152  else
5153  {
5154  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
5155  hAllocator->m_hDevice,
5156  m_DedicatedAllocation.m_hMemory,
5157  0, // offset
5158  VK_WHOLE_SIZE,
5159  0, // flags
5160  ppData);
5161  if(result == VK_SUCCESS)
5162  {
5163  m_DedicatedAllocation.m_pMappedData = *ppData;
5164  m_MapCount = 1;
5165  }
5166  return result;
5167  }
5168 }
5169 
5170 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
5171 {
5172  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
5173 
5174  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
5175  {
5176  --m_MapCount;
5177  if(m_MapCount == 0)
5178  {
5179  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
5180  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
5181  hAllocator->m_hDevice,
5182  m_DedicatedAllocation.m_hMemory);
5183  }
5184  }
5185  else
5186  {
5187  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
5188  }
5189 }
5190 
5191 #if VMA_STATS_STRING_ENABLED
5192 
5193 // Correspond to values of enum VmaSuballocationType.
5194 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
5195  "FREE",
5196  "UNKNOWN",
5197  "BUFFER",
5198  "IMAGE_UNKNOWN",
5199  "IMAGE_LINEAR",
5200  "IMAGE_OPTIMAL",
5201 };
5202 
5203 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
5204 {
5205  json.BeginObject();
5206 
5207  json.WriteString("Blocks");
5208  json.WriteNumber(stat.blockCount);
5209 
5210  json.WriteString("Allocations");
5211  json.WriteNumber(stat.allocationCount);
5212 
5213  json.WriteString("UnusedRanges");
5214  json.WriteNumber(stat.unusedRangeCount);
5215 
5216  json.WriteString("UsedBytes");
5217  json.WriteNumber(stat.usedBytes);
5218 
5219  json.WriteString("UnusedBytes");
5220  json.WriteNumber(stat.unusedBytes);
5221 
5222  if(stat.allocationCount > 1)
5223  {
5224  json.WriteString("AllocationSize");
5225  json.BeginObject(true);
5226  json.WriteString("Min");
5227  json.WriteNumber(stat.allocationSizeMin);
5228  json.WriteString("Avg");
5229  json.WriteNumber(stat.allocationSizeAvg);
5230  json.WriteString("Max");
5231  json.WriteNumber(stat.allocationSizeMax);
5232  json.EndObject();
5233  }
5234 
5235  if(stat.unusedRangeCount > 1)
5236  {
5237  json.WriteString("UnusedRangeSize");
5238  json.BeginObject(true);
5239  json.WriteString("Min");
5240  json.WriteNumber(stat.unusedRangeSizeMin);
5241  json.WriteString("Avg");
5242  json.WriteNumber(stat.unusedRangeSizeAvg);
5243  json.WriteString("Max");
5244  json.WriteNumber(stat.unusedRangeSizeMax);
5245  json.EndObject();
5246  }
5247 
5248  json.EndObject();
5249 }
5250 
5251 #endif // #if VMA_STATS_STRING_ENABLED
5252 
5253 struct VmaSuballocationItemSizeLess
5254 {
5255  bool operator()(
5256  const VmaSuballocationList::iterator lhs,
5257  const VmaSuballocationList::iterator rhs) const
5258  {
5259  return lhs->size < rhs->size;
5260  }
5261  bool operator()(
5262  const VmaSuballocationList::iterator lhs,
5263  VkDeviceSize rhsSize) const
5264  {
5265  return lhs->size < rhsSize;
5266  }
5267 };
5268 
5270 // class VmaBlockMetadata
5271 
5272 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
5273  m_Size(0),
5274  m_FreeCount(0),
5275  m_SumFreeSize(0),
5276  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
5277  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
5278 {
5279 }
5280 
5281 VmaBlockMetadata::~VmaBlockMetadata()
5282 {
5283 }
5284 
5285 void VmaBlockMetadata::Init(VkDeviceSize size)
5286 {
5287  m_Size = size;
5288  m_FreeCount = 1;
5289  m_SumFreeSize = size;
5290 
5291  VmaSuballocation suballoc = {};
5292  suballoc.offset = 0;
5293  suballoc.size = size;
5294  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5295  suballoc.hAllocation = VK_NULL_HANDLE;
5296 
5297  m_Suballocations.push_back(suballoc);
5298  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
5299  --suballocItem;
5300  m_FreeSuballocationsBySize.push_back(suballocItem);
5301 }
5302 
5303 bool VmaBlockMetadata::Validate() const
5304 {
5305  if(m_Suballocations.empty())
5306  {
5307  return false;
5308  }
5309 
5310  // Expected offset of new suballocation as calculates from previous ones.
5311  VkDeviceSize calculatedOffset = 0;
5312  // Expected number of free suballocations as calculated from traversing their list.
5313  uint32_t calculatedFreeCount = 0;
5314  // Expected sum size of free suballocations as calculated from traversing their list.
5315  VkDeviceSize calculatedSumFreeSize = 0;
5316  // Expected number of free suballocations that should be registered in
5317  // m_FreeSuballocationsBySize calculated from traversing their list.
5318  size_t freeSuballocationsToRegister = 0;
5319  // True if previous visisted suballocation was free.
5320  bool prevFree = false;
5321 
5322  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5323  suballocItem != m_Suballocations.cend();
5324  ++suballocItem)
5325  {
5326  const VmaSuballocation& subAlloc = *suballocItem;
5327 
5328  // Actual offset of this suballocation doesn't match expected one.
5329  if(subAlloc.offset != calculatedOffset)
5330  {
5331  return false;
5332  }
5333 
5334  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
5335  // Two adjacent free suballocations are invalid. They should be merged.
5336  if(prevFree && currFree)
5337  {
5338  return false;
5339  }
5340 
5341  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
5342  {
5343  return false;
5344  }
5345 
5346  if(currFree)
5347  {
5348  calculatedSumFreeSize += subAlloc.size;
5349  ++calculatedFreeCount;
5350  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5351  {
5352  ++freeSuballocationsToRegister;
5353  }
5354  }
5355  else
5356  {
5357  if(subAlloc.hAllocation->GetOffset() != subAlloc.offset)
5358  {
5359  return false;
5360  }
5361  if(subAlloc.hAllocation->GetSize() != subAlloc.size)
5362  {
5363  return false;
5364  }
5365  }
5366 
5367  calculatedOffset += subAlloc.size;
5368  prevFree = currFree;
5369  }
5370 
5371  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
5372  // match expected one.
5373  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
5374  {
5375  return false;
5376  }
5377 
5378  VkDeviceSize lastSize = 0;
5379  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
5380  {
5381  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
5382 
5383  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
5384  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
5385  {
5386  return false;
5387  }
5388  // They must be sorted by size ascending.
5389  if(suballocItem->size < lastSize)
5390  {
5391  return false;
5392  }
5393 
5394  lastSize = suballocItem->size;
5395  }
5396 
5397  // Check if totals match calculacted values.
5398  if(!ValidateFreeSuballocationList() ||
5399  (calculatedOffset != m_Size) ||
5400  (calculatedSumFreeSize != m_SumFreeSize) ||
5401  (calculatedFreeCount != m_FreeCount))
5402  {
5403  return false;
5404  }
5405 
5406  return true;
5407 }
5408 
5409 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
5410 {
5411  if(!m_FreeSuballocationsBySize.empty())
5412  {
5413  return m_FreeSuballocationsBySize.back()->size;
5414  }
5415  else
5416  {
5417  return 0;
5418  }
5419 }
5420 
5421 bool VmaBlockMetadata::IsEmpty() const
5422 {
5423  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
5424 }
5425 
5426 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
5427 {
5428  outInfo.blockCount = 1;
5429 
5430  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
5431  outInfo.allocationCount = rangeCount - m_FreeCount;
5432  outInfo.unusedRangeCount = m_FreeCount;
5433 
5434  outInfo.unusedBytes = m_SumFreeSize;
5435  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
5436 
5437  outInfo.allocationSizeMin = UINT64_MAX;
5438  outInfo.allocationSizeMax = 0;
5439  outInfo.unusedRangeSizeMin = UINT64_MAX;
5440  outInfo.unusedRangeSizeMax = 0;
5441 
5442  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5443  suballocItem != m_Suballocations.cend();
5444  ++suballocItem)
5445  {
5446  const VmaSuballocation& suballoc = *suballocItem;
5447  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
5448  {
5449  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
5450  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
5451  }
5452  else
5453  {
5454  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
5455  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
5456  }
5457  }
5458 }
5459 
5460 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
5461 {
5462  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
5463 
5464  inoutStats.size += m_Size;
5465  inoutStats.unusedSize += m_SumFreeSize;
5466  inoutStats.allocationCount += rangeCount - m_FreeCount;
5467  inoutStats.unusedRangeCount += m_FreeCount;
5468  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
5469 }
5470 
5471 #if VMA_STATS_STRING_ENABLED
5472 
5473 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
5474 {
5475  json.BeginObject();
5476 
5477  json.WriteString("TotalBytes");
5478  json.WriteNumber(m_Size);
5479 
5480  json.WriteString("UnusedBytes");
5481  json.WriteNumber(m_SumFreeSize);
5482 
5483  json.WriteString("Allocations");
5484  json.WriteNumber((uint64_t)m_Suballocations.size() - m_FreeCount);
5485 
5486  json.WriteString("UnusedRanges");
5487  json.WriteNumber(m_FreeCount);
5488 
5489  json.WriteString("Suballocations");
5490  json.BeginArray();
5491  size_t i = 0;
5492  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5493  suballocItem != m_Suballocations.cend();
5494  ++suballocItem, ++i)
5495  {
5496  json.BeginObject(true);
5497 
5498  json.WriteString("Type");
5499  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
5500 
5501  json.WriteString("Size");
5502  json.WriteNumber(suballocItem->size);
5503 
5504  json.WriteString("Offset");
5505  json.WriteNumber(suballocItem->offset);
5506 
5507  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
5508  {
5509  const void* pUserData = suballocItem->hAllocation->GetUserData();
5510  if(pUserData != VMA_NULL)
5511  {
5512  json.WriteString("UserData");
5513  if(suballocItem->hAllocation->IsUserDataString())
5514  {
5515  json.WriteString((const char*)pUserData);
5516  }
5517  else
5518  {
5519  json.BeginString();
5520  json.ContinueString_Pointer(pUserData);
5521  json.EndString();
5522  }
5523  }
5524  }
5525 
5526  json.EndObject();
5527  }
5528  json.EndArray();
5529 
5530  json.EndObject();
5531 }
5532 
5533 #endif // #if VMA_STATS_STRING_ENABLED
5534 
5535 /*
5536 How many suitable free suballocations to analyze before choosing best one.
5537 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
5538  be chosen.
5539 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
5540  suballocations will be analized and best one will be chosen.
5541 - Any other value is also acceptable.
5542 */
5543 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
5544 
5545 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
5546 {
5547  VMA_ASSERT(IsEmpty());
5548  pAllocationRequest->offset = 0;
5549  pAllocationRequest->sumFreeSize = m_SumFreeSize;
5550  pAllocationRequest->sumItemSize = 0;
5551  pAllocationRequest->item = m_Suballocations.begin();
5552  pAllocationRequest->itemsToMakeLostCount = 0;
5553 }
5554 
5555 bool VmaBlockMetadata::CreateAllocationRequest(
5556  uint32_t currentFrameIndex,
5557  uint32_t frameInUseCount,
5558  VkDeviceSize bufferImageGranularity,
5559  VkDeviceSize allocSize,
5560  VkDeviceSize allocAlignment,
5561  VmaSuballocationType allocType,
5562  bool canMakeOtherLost,
5563  VmaAllocationRequest* pAllocationRequest)
5564 {
5565  VMA_ASSERT(allocSize > 0);
5566  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5567  VMA_ASSERT(pAllocationRequest != VMA_NULL);
5568  VMA_HEAVY_ASSERT(Validate());
5569 
5570  // There is not enough total free space in this block to fullfill the request: Early return.
5571  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
5572  {
5573  return false;
5574  }
5575 
5576  // New algorithm, efficiently searching freeSuballocationsBySize.
5577  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
5578  if(freeSuballocCount > 0)
5579  {
5580  if(VMA_BEST_FIT)
5581  {
5582  // Find first free suballocation with size not less than allocSize.
5583  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5584  m_FreeSuballocationsBySize.data(),
5585  m_FreeSuballocationsBySize.data() + freeSuballocCount,
5586  allocSize,
5587  VmaSuballocationItemSizeLess());
5588  size_t index = it - m_FreeSuballocationsBySize.data();
5589  for(; index < freeSuballocCount; ++index)
5590  {
5591  if(CheckAllocation(
5592  currentFrameIndex,
5593  frameInUseCount,
5594  bufferImageGranularity,
5595  allocSize,
5596  allocAlignment,
5597  allocType,
5598  m_FreeSuballocationsBySize[index],
5599  false, // canMakeOtherLost
5600  &pAllocationRequest->offset,
5601  &pAllocationRequest->itemsToMakeLostCount,
5602  &pAllocationRequest->sumFreeSize,
5603  &pAllocationRequest->sumItemSize))
5604  {
5605  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5606  return true;
5607  }
5608  }
5609  }
5610  else
5611  {
5612  // Search staring from biggest suballocations.
5613  for(size_t index = freeSuballocCount; index--; )
5614  {
5615  if(CheckAllocation(
5616  currentFrameIndex,
5617  frameInUseCount,
5618  bufferImageGranularity,
5619  allocSize,
5620  allocAlignment,
5621  allocType,
5622  m_FreeSuballocationsBySize[index],
5623  false, // canMakeOtherLost
5624  &pAllocationRequest->offset,
5625  &pAllocationRequest->itemsToMakeLostCount,
5626  &pAllocationRequest->sumFreeSize,
5627  &pAllocationRequest->sumItemSize))
5628  {
5629  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5630  return true;
5631  }
5632  }
5633  }
5634  }
5635 
5636  if(canMakeOtherLost)
5637  {
5638  // Brute-force algorithm. TODO: Come up with something better.
5639 
5640  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
5641  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
5642 
5643  VmaAllocationRequest tmpAllocRequest = {};
5644  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
5645  suballocIt != m_Suballocations.end();
5646  ++suballocIt)
5647  {
5648  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
5649  suballocIt->hAllocation->CanBecomeLost())
5650  {
5651  if(CheckAllocation(
5652  currentFrameIndex,
5653  frameInUseCount,
5654  bufferImageGranularity,
5655  allocSize,
5656  allocAlignment,
5657  allocType,
5658  suballocIt,
5659  canMakeOtherLost,
5660  &tmpAllocRequest.offset,
5661  &tmpAllocRequest.itemsToMakeLostCount,
5662  &tmpAllocRequest.sumFreeSize,
5663  &tmpAllocRequest.sumItemSize))
5664  {
5665  tmpAllocRequest.item = suballocIt;
5666 
5667  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
5668  {
5669  *pAllocationRequest = tmpAllocRequest;
5670  }
5671  }
5672  }
5673  }
5674 
5675  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
5676  {
5677  return true;
5678  }
5679  }
5680 
5681  return false;
5682 }
5683 
5684 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
5685  uint32_t currentFrameIndex,
5686  uint32_t frameInUseCount,
5687  VmaAllocationRequest* pAllocationRequest)
5688 {
5689  while(pAllocationRequest->itemsToMakeLostCount > 0)
5690  {
5691  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
5692  {
5693  ++pAllocationRequest->item;
5694  }
5695  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5696  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
5697  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
5698  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5699  {
5700  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
5701  --pAllocationRequest->itemsToMakeLostCount;
5702  }
5703  else
5704  {
5705  return false;
5706  }
5707  }
5708 
5709  VMA_HEAVY_ASSERT(Validate());
5710  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5711  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
5712 
5713  return true;
5714 }
5715 
5716 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
5717 {
5718  uint32_t lostAllocationCount = 0;
5719  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
5720  it != m_Suballocations.end();
5721  ++it)
5722  {
5723  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
5724  it->hAllocation->CanBecomeLost() &&
5725  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5726  {
5727  it = FreeSuballocation(it);
5728  ++lostAllocationCount;
5729  }
5730  }
5731  return lostAllocationCount;
5732 }
5733 
5734 void VmaBlockMetadata::Alloc(
5735  const VmaAllocationRequest& request,
5736  VmaSuballocationType type,
5737  VkDeviceSize allocSize,
5738  VmaAllocation hAllocation)
5739 {
5740  VMA_ASSERT(request.item != m_Suballocations.end());
5741  VmaSuballocation& suballoc = *request.item;
5742  // Given suballocation is a free block.
5743  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5744  // Given offset is inside this suballocation.
5745  VMA_ASSERT(request.offset >= suballoc.offset);
5746  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
5747  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
5748  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
5749 
5750  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
5751  // it to become used.
5752  UnregisterFreeSuballocation(request.item);
5753 
5754  suballoc.offset = request.offset;
5755  suballoc.size = allocSize;
5756  suballoc.type = type;
5757  suballoc.hAllocation = hAllocation;
5758 
5759  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
5760  if(paddingEnd)
5761  {
5762  VmaSuballocation paddingSuballoc = {};
5763  paddingSuballoc.offset = request.offset + allocSize;
5764  paddingSuballoc.size = paddingEnd;
5765  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5766  VmaSuballocationList::iterator next = request.item;
5767  ++next;
5768  const VmaSuballocationList::iterator paddingEndItem =
5769  m_Suballocations.insert(next, paddingSuballoc);
5770  RegisterFreeSuballocation(paddingEndItem);
5771  }
5772 
5773  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
5774  if(paddingBegin)
5775  {
5776  VmaSuballocation paddingSuballoc = {};
5777  paddingSuballoc.offset = request.offset - paddingBegin;
5778  paddingSuballoc.size = paddingBegin;
5779  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5780  const VmaSuballocationList::iterator paddingBeginItem =
5781  m_Suballocations.insert(request.item, paddingSuballoc);
5782  RegisterFreeSuballocation(paddingBeginItem);
5783  }
5784 
5785  // Update totals.
5786  m_FreeCount = m_FreeCount - 1;
5787  if(paddingBegin > 0)
5788  {
5789  ++m_FreeCount;
5790  }
5791  if(paddingEnd > 0)
5792  {
5793  ++m_FreeCount;
5794  }
5795  m_SumFreeSize -= allocSize;
5796 }
5797 
5798 void VmaBlockMetadata::Free(const VmaAllocation allocation)
5799 {
5800  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5801  suballocItem != m_Suballocations.end();
5802  ++suballocItem)
5803  {
5804  VmaSuballocation& suballoc = *suballocItem;
5805  if(suballoc.hAllocation == allocation)
5806  {
5807  FreeSuballocation(suballocItem);
5808  VMA_HEAVY_ASSERT(Validate());
5809  return;
5810  }
5811  }
5812  VMA_ASSERT(0 && "Not found!");
5813 }
5814 
5815 void VmaBlockMetadata::FreeAtOffset(VkDeviceSize offset)
5816 {
5817  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5818  suballocItem != m_Suballocations.end();
5819  ++suballocItem)
5820  {
5821  VmaSuballocation& suballoc = *suballocItem;
5822  if(suballoc.offset == offset)
5823  {
5824  FreeSuballocation(suballocItem);
5825  return;
5826  }
5827  }
5828  VMA_ASSERT(0 && "Not found!");
5829 }
5830 
5831 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
5832 {
5833  VkDeviceSize lastSize = 0;
5834  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
5835  {
5836  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
5837 
5838  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
5839  {
5840  VMA_ASSERT(0);
5841  return false;
5842  }
5843  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5844  {
5845  VMA_ASSERT(0);
5846  return false;
5847  }
5848  if(it->size < lastSize)
5849  {
5850  VMA_ASSERT(0);
5851  return false;
5852  }
5853 
5854  lastSize = it->size;
5855  }
5856  return true;
5857 }
5858 
5859 bool VmaBlockMetadata::CheckAllocation(
5860  uint32_t currentFrameIndex,
5861  uint32_t frameInUseCount,
5862  VkDeviceSize bufferImageGranularity,
5863  VkDeviceSize allocSize,
5864  VkDeviceSize allocAlignment,
5865  VmaSuballocationType allocType,
5866  VmaSuballocationList::const_iterator suballocItem,
5867  bool canMakeOtherLost,
5868  VkDeviceSize* pOffset,
5869  size_t* itemsToMakeLostCount,
5870  VkDeviceSize* pSumFreeSize,
5871  VkDeviceSize* pSumItemSize) const
5872 {
5873  VMA_ASSERT(allocSize > 0);
5874  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5875  VMA_ASSERT(suballocItem != m_Suballocations.cend());
5876  VMA_ASSERT(pOffset != VMA_NULL);
5877 
5878  *itemsToMakeLostCount = 0;
5879  *pSumFreeSize = 0;
5880  *pSumItemSize = 0;
5881 
5882  if(canMakeOtherLost)
5883  {
5884  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5885  {
5886  *pSumFreeSize = suballocItem->size;
5887  }
5888  else
5889  {
5890  if(suballocItem->hAllocation->CanBecomeLost() &&
5891  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5892  {
5893  ++*itemsToMakeLostCount;
5894  *pSumItemSize = suballocItem->size;
5895  }
5896  else
5897  {
5898  return false;
5899  }
5900  }
5901 
5902  // Remaining size is too small for this request: Early return.
5903  if(m_Size - suballocItem->offset < allocSize)
5904  {
5905  return false;
5906  }
5907 
5908  // Start from offset equal to beginning of this suballocation.
5909  *pOffset = suballocItem->offset;
5910 
5911  // Apply VMA_DEBUG_MARGIN at the beginning.
5912  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5913  {
5914  *pOffset += VMA_DEBUG_MARGIN;
5915  }
5916 
5917  // Apply alignment.
5918  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5919  *pOffset = VmaAlignUp(*pOffset, alignment);
5920 
5921  // Check previous suballocations for BufferImageGranularity conflicts.
5922  // Make bigger alignment if necessary.
5923  if(bufferImageGranularity > 1)
5924  {
5925  bool bufferImageGranularityConflict = false;
5926  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5927  while(prevSuballocItem != m_Suballocations.cbegin())
5928  {
5929  --prevSuballocItem;
5930  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5931  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5932  {
5933  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5934  {
5935  bufferImageGranularityConflict = true;
5936  break;
5937  }
5938  }
5939  else
5940  // Already on previous page.
5941  break;
5942  }
5943  if(bufferImageGranularityConflict)
5944  {
5945  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5946  }
5947  }
5948 
5949  // Now that we have final *pOffset, check if we are past suballocItem.
5950  // If yes, return false - this function should be called for another suballocItem as starting point.
5951  if(*pOffset >= suballocItem->offset + suballocItem->size)
5952  {
5953  return false;
5954  }
5955 
5956  // Calculate padding at the beginning based on current offset.
5957  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
5958 
5959  // Calculate required margin at the end if this is not last suballocation.
5960  VmaSuballocationList::const_iterator next = suballocItem;
5961  ++next;
5962  const VkDeviceSize requiredEndMargin =
5963  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5964 
5965  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
5966  // Another early return check.
5967  if(suballocItem->offset + totalSize > m_Size)
5968  {
5969  return false;
5970  }
5971 
5972  // Advance lastSuballocItem until desired size is reached.
5973  // Update itemsToMakeLostCount.
5974  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
5975  if(totalSize > suballocItem->size)
5976  {
5977  VkDeviceSize remainingSize = totalSize - suballocItem->size;
5978  while(remainingSize > 0)
5979  {
5980  ++lastSuballocItem;
5981  if(lastSuballocItem == m_Suballocations.cend())
5982  {
5983  return false;
5984  }
5985  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5986  {
5987  *pSumFreeSize += lastSuballocItem->size;
5988  }
5989  else
5990  {
5991  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
5992  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
5993  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5994  {
5995  ++*itemsToMakeLostCount;
5996  *pSumItemSize += lastSuballocItem->size;
5997  }
5998  else
5999  {
6000  return false;
6001  }
6002  }
6003  remainingSize = (lastSuballocItem->size < remainingSize) ?
6004  remainingSize - lastSuballocItem->size : 0;
6005  }
6006  }
6007 
6008  // Check next suballocations for BufferImageGranularity conflicts.
6009  // If conflict exists, we must mark more allocations lost or fail.
6010  if(bufferImageGranularity > 1)
6011  {
6012  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
6013  ++nextSuballocItem;
6014  while(nextSuballocItem != m_Suballocations.cend())
6015  {
6016  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
6017  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
6018  {
6019  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
6020  {
6021  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
6022  if(nextSuballoc.hAllocation->CanBecomeLost() &&
6023  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
6024  {
6025  ++*itemsToMakeLostCount;
6026  }
6027  else
6028  {
6029  return false;
6030  }
6031  }
6032  }
6033  else
6034  {
6035  // Already on next page.
6036  break;
6037  }
6038  ++nextSuballocItem;
6039  }
6040  }
6041  }
6042  else
6043  {
6044  const VmaSuballocation& suballoc = *suballocItem;
6045  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
6046 
6047  *pSumFreeSize = suballoc.size;
6048 
6049  // Size of this suballocation is too small for this request: Early return.
6050  if(suballoc.size < allocSize)
6051  {
6052  return false;
6053  }
6054 
6055  // Start from offset equal to beginning of this suballocation.
6056  *pOffset = suballoc.offset;
6057 
6058  // Apply VMA_DEBUG_MARGIN at the beginning.
6059  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
6060  {
6061  *pOffset += VMA_DEBUG_MARGIN;
6062  }
6063 
6064  // Apply alignment.
6065  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
6066  *pOffset = VmaAlignUp(*pOffset, alignment);
6067 
6068  // Check previous suballocations for BufferImageGranularity conflicts.
6069  // Make bigger alignment if necessary.
6070  if(bufferImageGranularity > 1)
6071  {
6072  bool bufferImageGranularityConflict = false;
6073  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
6074  while(prevSuballocItem != m_Suballocations.cbegin())
6075  {
6076  --prevSuballocItem;
6077  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
6078  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
6079  {
6080  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
6081  {
6082  bufferImageGranularityConflict = true;
6083  break;
6084  }
6085  }
6086  else
6087  // Already on previous page.
6088  break;
6089  }
6090  if(bufferImageGranularityConflict)
6091  {
6092  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
6093  }
6094  }
6095 
6096  // Calculate padding at the beginning based on current offset.
6097  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
6098 
6099  // Calculate required margin at the end if this is not last suballocation.
6100  VmaSuballocationList::const_iterator next = suballocItem;
6101  ++next;
6102  const VkDeviceSize requiredEndMargin =
6103  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
6104 
6105  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
6106  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
6107  {
6108  return false;
6109  }
6110 
6111  // Check next suballocations for BufferImageGranularity conflicts.
6112  // If conflict exists, allocation cannot be made here.
6113  if(bufferImageGranularity > 1)
6114  {
6115  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
6116  ++nextSuballocItem;
6117  while(nextSuballocItem != m_Suballocations.cend())
6118  {
6119  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
6120  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
6121  {
6122  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
6123  {
6124  return false;
6125  }
6126  }
6127  else
6128  {
6129  // Already on next page.
6130  break;
6131  }
6132  ++nextSuballocItem;
6133  }
6134  }
6135  }
6136 
6137  // All tests passed: Success. pOffset is already filled.
6138  return true;
6139 }
6140 
6141 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
6142 {
6143  VMA_ASSERT(item != m_Suballocations.end());
6144  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
6145 
6146  VmaSuballocationList::iterator nextItem = item;
6147  ++nextItem;
6148  VMA_ASSERT(nextItem != m_Suballocations.end());
6149  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
6150 
6151  item->size += nextItem->size;
6152  --m_FreeCount;
6153  m_Suballocations.erase(nextItem);
6154 }
6155 
6156 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
6157 {
6158  // Change this suballocation to be marked as free.
6159  VmaSuballocation& suballoc = *suballocItem;
6160  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
6161  suballoc.hAllocation = VK_NULL_HANDLE;
6162 
6163  // Update totals.
6164  ++m_FreeCount;
6165  m_SumFreeSize += suballoc.size;
6166 
6167  // Merge with previous and/or next suballocation if it's also free.
6168  bool mergeWithNext = false;
6169  bool mergeWithPrev = false;
6170 
6171  VmaSuballocationList::iterator nextItem = suballocItem;
6172  ++nextItem;
6173  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
6174  {
6175  mergeWithNext = true;
6176  }
6177 
6178  VmaSuballocationList::iterator prevItem = suballocItem;
6179  if(suballocItem != m_Suballocations.begin())
6180  {
6181  --prevItem;
6182  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
6183  {
6184  mergeWithPrev = true;
6185  }
6186  }
6187 
6188  if(mergeWithNext)
6189  {
6190  UnregisterFreeSuballocation(nextItem);
6191  MergeFreeWithNext(suballocItem);
6192  }
6193 
6194  if(mergeWithPrev)
6195  {
6196  UnregisterFreeSuballocation(prevItem);
6197  MergeFreeWithNext(prevItem);
6198  RegisterFreeSuballocation(prevItem);
6199  return prevItem;
6200  }
6201  else
6202  {
6203  RegisterFreeSuballocation(suballocItem);
6204  return suballocItem;
6205  }
6206 }
6207 
6208 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
6209 {
6210  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
6211  VMA_ASSERT(item->size > 0);
6212 
6213  // You may want to enable this validation at the beginning or at the end of
6214  // this function, depending on what do you want to check.
6215  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6216 
6217  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6218  {
6219  if(m_FreeSuballocationsBySize.empty())
6220  {
6221  m_FreeSuballocationsBySize.push_back(item);
6222  }
6223  else
6224  {
6225  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
6226  }
6227  }
6228 
6229  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6230 }
6231 
6232 
6233 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
6234 {
6235  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
6236  VMA_ASSERT(item->size > 0);
6237 
6238  // You may want to enable this validation at the beginning or at the end of
6239  // this function, depending on what do you want to check.
6240  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6241 
6242  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6243  {
6244  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
6245  m_FreeSuballocationsBySize.data(),
6246  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
6247  item,
6248  VmaSuballocationItemSizeLess());
6249  for(size_t index = it - m_FreeSuballocationsBySize.data();
6250  index < m_FreeSuballocationsBySize.size();
6251  ++index)
6252  {
6253  if(m_FreeSuballocationsBySize[index] == item)
6254  {
6255  VmaVectorRemove(m_FreeSuballocationsBySize, index);
6256  return;
6257  }
6258  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
6259  }
6260  VMA_ASSERT(0 && "Not found.");
6261  }
6262 
6263  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6264 }
6265 
6267 // class VmaDeviceMemoryBlock
6268 
6269 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
6270  m_Metadata(hAllocator),
6271  m_MemoryTypeIndex(UINT32_MAX),
6272  m_hMemory(VK_NULL_HANDLE),
6273  m_MapCount(0),
6274  m_pMappedData(VMA_NULL)
6275 {
6276 }
6277 
6278 void VmaDeviceMemoryBlock::Init(
6279  uint32_t newMemoryTypeIndex,
6280  VkDeviceMemory newMemory,
6281  VkDeviceSize newSize)
6282 {
6283  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
6284 
6285  m_MemoryTypeIndex = newMemoryTypeIndex;
6286  m_hMemory = newMemory;
6287 
6288  m_Metadata.Init(newSize);
6289 }
6290 
6291 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
6292 {
6293  // This is the most important assert in the entire library.
6294  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
6295  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
6296 
6297  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
6298  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
6299  m_hMemory = VK_NULL_HANDLE;
6300 }
6301 
6302 bool VmaDeviceMemoryBlock::Validate() const
6303 {
6304  if((m_hMemory == VK_NULL_HANDLE) ||
6305  (m_Metadata.GetSize() == 0))
6306  {
6307  return false;
6308  }
6309 
6310  return m_Metadata.Validate();
6311 }
6312 
6313 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
6314 {
6315  if(count == 0)
6316  {
6317  return VK_SUCCESS;
6318  }
6319 
6320  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6321  if(m_MapCount != 0)
6322  {
6323  m_MapCount += count;
6324  VMA_ASSERT(m_pMappedData != VMA_NULL);
6325  if(ppData != VMA_NULL)
6326  {
6327  *ppData = m_pMappedData;
6328  }
6329  return VK_SUCCESS;
6330  }
6331  else
6332  {
6333  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
6334  hAllocator->m_hDevice,
6335  m_hMemory,
6336  0, // offset
6337  VK_WHOLE_SIZE,
6338  0, // flags
6339  &m_pMappedData);
6340  if(result == VK_SUCCESS)
6341  {
6342  if(ppData != VMA_NULL)
6343  {
6344  *ppData = m_pMappedData;
6345  }
6346  m_MapCount = count;
6347  }
6348  return result;
6349  }
6350 }
6351 
6352 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
6353 {
6354  if(count == 0)
6355  {
6356  return;
6357  }
6358 
6359  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6360  if(m_MapCount >= count)
6361  {
6362  m_MapCount -= count;
6363  if(m_MapCount == 0)
6364  {
6365  m_pMappedData = VMA_NULL;
6366  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
6367  }
6368  }
6369  else
6370  {
6371  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
6372  }
6373 }
6374 
6375 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
6376  const VmaAllocator hAllocator,
6377  const VmaAllocation hAllocation,
6378  VkBuffer hBuffer)
6379 {
6380  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
6381  hAllocation->GetBlock() == this);
6382  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
6383  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6384  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
6385  hAllocator->m_hDevice,
6386  hBuffer,
6387  m_hMemory,
6388  hAllocation->GetOffset());
6389 }
6390 
6391 VkResult VmaDeviceMemoryBlock::BindImageMemory(
6392  const VmaAllocator hAllocator,
6393  const VmaAllocation hAllocation,
6394  VkImage hImage)
6395 {
6396  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
6397  hAllocation->GetBlock() == this);
6398  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
6399  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6400  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
6401  hAllocator->m_hDevice,
6402  hImage,
6403  m_hMemory,
6404  hAllocation->GetOffset());
6405 }
6406 
6407 static void InitStatInfo(VmaStatInfo& outInfo)
6408 {
6409  memset(&outInfo, 0, sizeof(outInfo));
6410  outInfo.allocationSizeMin = UINT64_MAX;
6411  outInfo.unusedRangeSizeMin = UINT64_MAX;
6412 }
6413 
6414 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
6415 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
6416 {
6417  inoutInfo.blockCount += srcInfo.blockCount;
6418  inoutInfo.allocationCount += srcInfo.allocationCount;
6419  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
6420  inoutInfo.usedBytes += srcInfo.usedBytes;
6421  inoutInfo.unusedBytes += srcInfo.unusedBytes;
6422  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
6423  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
6424  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
6425  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
6426 }
6427 
6428 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
6429 {
6430  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
6431  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
6432  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
6433  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
6434 }
6435 
6436 VmaPool_T::VmaPool_T(
6437  VmaAllocator hAllocator,
6438  const VmaPoolCreateInfo& createInfo) :
6439  m_BlockVector(
6440  hAllocator,
6441  createInfo.memoryTypeIndex,
6442  createInfo.blockSize,
6443  createInfo.minBlockCount,
6444  createInfo.maxBlockCount,
6445  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
6446  createInfo.frameInUseCount,
6447  true) // isCustomPool
6448 {
6449 }
6450 
6451 VmaPool_T::~VmaPool_T()
6452 {
6453 }
6454 
6455 #if VMA_STATS_STRING_ENABLED
6456 
6457 #endif // #if VMA_STATS_STRING_ENABLED
6458 
6459 VmaBlockVector::VmaBlockVector(
6460  VmaAllocator hAllocator,
6461  uint32_t memoryTypeIndex,
6462  VkDeviceSize preferredBlockSize,
6463  size_t minBlockCount,
6464  size_t maxBlockCount,
6465  VkDeviceSize bufferImageGranularity,
6466  uint32_t frameInUseCount,
6467  bool isCustomPool) :
6468  m_hAllocator(hAllocator),
6469  m_MemoryTypeIndex(memoryTypeIndex),
6470  m_PreferredBlockSize(preferredBlockSize),
6471  m_MinBlockCount(minBlockCount),
6472  m_MaxBlockCount(maxBlockCount),
6473  m_BufferImageGranularity(bufferImageGranularity),
6474  m_FrameInUseCount(frameInUseCount),
6475  m_IsCustomPool(isCustomPool),
6476  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
6477  m_HasEmptyBlock(false),
6478  m_pDefragmentator(VMA_NULL)
6479 {
6480 }
6481 
6482 VmaBlockVector::~VmaBlockVector()
6483 {
6484  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
6485 
6486  for(size_t i = m_Blocks.size(); i--; )
6487  {
6488  m_Blocks[i]->Destroy(m_hAllocator);
6489  vma_delete(m_hAllocator, m_Blocks[i]);
6490  }
6491 }
6492 
6493 VkResult VmaBlockVector::CreateMinBlocks()
6494 {
6495  for(size_t i = 0; i < m_MinBlockCount; ++i)
6496  {
6497  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
6498  if(res != VK_SUCCESS)
6499  {
6500  return res;
6501  }
6502  }
6503  return VK_SUCCESS;
6504 }
6505 
6506 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
6507 {
6508  pStats->size = 0;
6509  pStats->unusedSize = 0;
6510  pStats->allocationCount = 0;
6511  pStats->unusedRangeCount = 0;
6512  pStats->unusedRangeSizeMax = 0;
6513 
6514  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6515 
6516  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6517  {
6518  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6519  VMA_ASSERT(pBlock);
6520  VMA_HEAVY_ASSERT(pBlock->Validate());
6521  pBlock->m_Metadata.AddPoolStats(*pStats);
6522  }
6523 }
6524 
6525 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
6526 
6527 VkResult VmaBlockVector::Allocate(
6528  VmaPool hCurrentPool,
6529  uint32_t currentFrameIndex,
6530  const VkMemoryRequirements& vkMemReq,
6531  const VmaAllocationCreateInfo& createInfo,
6532  VmaSuballocationType suballocType,
6533  VmaAllocation* pAllocation)
6534 {
6535  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
6536  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
6537 
6538  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6539 
6540  // 1. Search existing allocations. Try to allocate without making other allocations lost.
6541  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6542  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6543  {
6544  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6545  VMA_ASSERT(pCurrBlock);
6546  VmaAllocationRequest currRequest = {};
6547  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6548  currentFrameIndex,
6549  m_FrameInUseCount,
6550  m_BufferImageGranularity,
6551  vkMemReq.size,
6552  vkMemReq.alignment,
6553  suballocType,
6554  false, // canMakeOtherLost
6555  &currRequest))
6556  {
6557  // Allocate from pCurrBlock.
6558  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
6559 
6560  if(mapped)
6561  {
6562  VkResult res = pCurrBlock->Map(m_hAllocator, 1, VMA_NULL);
6563  if(res != VK_SUCCESS)
6564  {
6565  return res;
6566  }
6567  }
6568 
6569  // We no longer have an empty Allocation.
6570  if(pCurrBlock->m_Metadata.IsEmpty())
6571  {
6572  m_HasEmptyBlock = false;
6573  }
6574 
6575  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6576  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
6577  (*pAllocation)->InitBlockAllocation(
6578  hCurrentPool,
6579  pCurrBlock,
6580  currRequest.offset,
6581  vkMemReq.alignment,
6582  vkMemReq.size,
6583  suballocType,
6584  mapped,
6585  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6586  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
6587  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6588  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6589  return VK_SUCCESS;
6590  }
6591  }
6592 
6593  const bool canCreateNewBlock =
6594  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
6595  (m_Blocks.size() < m_MaxBlockCount);
6596 
6597  // 2. Try to create new block.
6598  if(canCreateNewBlock)
6599  {
6600  // Calculate optimal size for new block.
6601  VkDeviceSize newBlockSize = m_PreferredBlockSize;
6602  uint32_t newBlockSizeShift = 0;
6603  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
6604 
6605  // Allocating blocks of other sizes is allowed only in default pools.
6606  // In custom pools block size is fixed.
6607  if(m_IsCustomPool == false)
6608  {
6609  // Allocate 1/8, 1/4, 1/2 as first blocks.
6610  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
6611  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
6612  {
6613  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
6614  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= vkMemReq.size * 2)
6615  {
6616  newBlockSize = smallerNewBlockSize;
6617  ++newBlockSizeShift;
6618  }
6619  else
6620  {
6621  break;
6622  }
6623  }
6624  }
6625 
6626  size_t newBlockIndex = 0;
6627  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
6628  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
6629  if(m_IsCustomPool == false)
6630  {
6631  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
6632  {
6633  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
6634  if(smallerNewBlockSize >= vkMemReq.size)
6635  {
6636  newBlockSize = smallerNewBlockSize;
6637  ++newBlockSizeShift;
6638  res = CreateBlock(newBlockSize, &newBlockIndex);
6639  }
6640  else
6641  {
6642  break;
6643  }
6644  }
6645  }
6646 
6647  if(res == VK_SUCCESS)
6648  {
6649  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
6650  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= vkMemReq.size);
6651 
6652  if(mapped)
6653  {
6654  res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
6655  if(res != VK_SUCCESS)
6656  {
6657  return res;
6658  }
6659  }
6660 
6661  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
6662  VmaAllocationRequest allocRequest;
6663  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
6664  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6665  pBlock->m_Metadata.Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
6666  (*pAllocation)->InitBlockAllocation(
6667  hCurrentPool,
6668  pBlock,
6669  allocRequest.offset,
6670  vkMemReq.alignment,
6671  vkMemReq.size,
6672  suballocType,
6673  mapped,
6674  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6675  VMA_HEAVY_ASSERT(pBlock->Validate());
6676  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
6677  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6678  return VK_SUCCESS;
6679  }
6680  }
6681 
6682  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
6683 
6684  // 3. Try to allocate from existing blocks with making other allocations lost.
6685  if(canMakeOtherLost)
6686  {
6687  uint32_t tryIndex = 0;
6688  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
6689  {
6690  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
6691  VmaAllocationRequest bestRequest = {};
6692  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
6693 
6694  // 1. Search existing allocations.
6695  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6696  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6697  {
6698  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6699  VMA_ASSERT(pCurrBlock);
6700  VmaAllocationRequest currRequest = {};
6701  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6702  currentFrameIndex,
6703  m_FrameInUseCount,
6704  m_BufferImageGranularity,
6705  vkMemReq.size,
6706  vkMemReq.alignment,
6707  suballocType,
6708  canMakeOtherLost,
6709  &currRequest))
6710  {
6711  const VkDeviceSize currRequestCost = currRequest.CalcCost();
6712  if(pBestRequestBlock == VMA_NULL ||
6713  currRequestCost < bestRequestCost)
6714  {
6715  pBestRequestBlock = pCurrBlock;
6716  bestRequest = currRequest;
6717  bestRequestCost = currRequestCost;
6718 
6719  if(bestRequestCost == 0)
6720  {
6721  break;
6722  }
6723  }
6724  }
6725  }
6726 
6727  if(pBestRequestBlock != VMA_NULL)
6728  {
6729  if(mapped)
6730  {
6731  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
6732  if(res != VK_SUCCESS)
6733  {
6734  return res;
6735  }
6736  }
6737 
6738  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
6739  currentFrameIndex,
6740  m_FrameInUseCount,
6741  &bestRequest))
6742  {
6743  // We no longer have an empty Allocation.
6744  if(pBestRequestBlock->m_Metadata.IsEmpty())
6745  {
6746  m_HasEmptyBlock = false;
6747  }
6748  // Allocate from this pBlock.
6749  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6750  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
6751  (*pAllocation)->InitBlockAllocation(
6752  hCurrentPool,
6753  pBestRequestBlock,
6754  bestRequest.offset,
6755  vkMemReq.alignment,
6756  vkMemReq.size,
6757  suballocType,
6758  mapped,
6759  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6760  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
6761  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6762  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6763  return VK_SUCCESS;
6764  }
6765  // else: Some allocations must have been touched while we are here. Next try.
6766  }
6767  else
6768  {
6769  // Could not find place in any of the blocks - break outer loop.
6770  break;
6771  }
6772  }
6773  /* Maximum number of tries exceeded - a very unlike event when many other
6774  threads are simultaneously touching allocations making it impossible to make
6775  lost at the same time as we try to allocate. */
6776  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
6777  {
6778  return VK_ERROR_TOO_MANY_OBJECTS;
6779  }
6780  }
6781 
6782  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6783 }
6784 
6785 void VmaBlockVector::Free(
6786  VmaAllocation hAllocation)
6787 {
6788  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
6789 
6790  // Scope for lock.
6791  {
6792  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6793 
6794  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
6795 
6796  if(hAllocation->IsPersistentMap())
6797  {
6798  pBlock->Unmap(m_hAllocator, 1);
6799  }
6800 
6801  pBlock->m_Metadata.Free(hAllocation);
6802  VMA_HEAVY_ASSERT(pBlock->Validate());
6803 
6804  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
6805 
6806  // pBlock became empty after this deallocation.
6807  if(pBlock->m_Metadata.IsEmpty())
6808  {
6809  // Already has empty Allocation. We don't want to have two, so delete this one.
6810  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
6811  {
6812  pBlockToDelete = pBlock;
6813  Remove(pBlock);
6814  }
6815  // We now have first empty Allocation.
6816  else
6817  {
6818  m_HasEmptyBlock = true;
6819  }
6820  }
6821  // pBlock didn't become empty, but we have another empty block - find and free that one.
6822  // (This is optional, heuristics.)
6823  else if(m_HasEmptyBlock)
6824  {
6825  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
6826  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
6827  {
6828  pBlockToDelete = pLastBlock;
6829  m_Blocks.pop_back();
6830  m_HasEmptyBlock = false;
6831  }
6832  }
6833 
6834  IncrementallySortBlocks();
6835  }
6836 
6837  // Destruction of a free Allocation. Deferred until this point, outside of mutex
6838  // lock, for performance reason.
6839  if(pBlockToDelete != VMA_NULL)
6840  {
6841  VMA_DEBUG_LOG(" Deleted empty allocation");
6842  pBlockToDelete->Destroy(m_hAllocator);
6843  vma_delete(m_hAllocator, pBlockToDelete);
6844  }
6845 }
6846 
6847 size_t VmaBlockVector::CalcMaxBlockSize() const
6848 {
6849  size_t result = 0;
6850  for(size_t i = m_Blocks.size(); i--; )
6851  {
6852  result = VMA_MAX((uint64_t)result, (uint64_t)m_Blocks[i]->m_Metadata.GetSize());
6853  if(result >= m_PreferredBlockSize)
6854  {
6855  break;
6856  }
6857  }
6858  return result;
6859 }
6860 
6861 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
6862 {
6863  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6864  {
6865  if(m_Blocks[blockIndex] == pBlock)
6866  {
6867  VmaVectorRemove(m_Blocks, blockIndex);
6868  return;
6869  }
6870  }
6871  VMA_ASSERT(0);
6872 }
6873 
6874 void VmaBlockVector::IncrementallySortBlocks()
6875 {
6876  // Bubble sort only until first swap.
6877  for(size_t i = 1; i < m_Blocks.size(); ++i)
6878  {
6879  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
6880  {
6881  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
6882  return;
6883  }
6884  }
6885 }
6886 
6887 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
6888 {
6889  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
6890  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
6891  allocInfo.allocationSize = blockSize;
6892  VkDeviceMemory mem = VK_NULL_HANDLE;
6893  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
6894  if(res < 0)
6895  {
6896  return res;
6897  }
6898 
6899  // New VkDeviceMemory successfully created.
6900 
6901  // Create new Allocation for it.
6902  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
6903  pBlock->Init(
6904  m_MemoryTypeIndex,
6905  mem,
6906  allocInfo.allocationSize);
6907 
6908  m_Blocks.push_back(pBlock);
6909  if(pNewBlockIndex != VMA_NULL)
6910  {
6911  *pNewBlockIndex = m_Blocks.size() - 1;
6912  }
6913 
6914  return VK_SUCCESS;
6915 }
6916 
6917 #if VMA_STATS_STRING_ENABLED
6918 
6919 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
6920 {
6921  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6922 
6923  json.BeginObject();
6924 
6925  if(m_IsCustomPool)
6926  {
6927  json.WriteString("MemoryTypeIndex");
6928  json.WriteNumber(m_MemoryTypeIndex);
6929 
6930  json.WriteString("BlockSize");
6931  json.WriteNumber(m_PreferredBlockSize);
6932 
6933  json.WriteString("BlockCount");
6934  json.BeginObject(true);
6935  if(m_MinBlockCount > 0)
6936  {
6937  json.WriteString("Min");
6938  json.WriteNumber((uint64_t)m_MinBlockCount);
6939  }
6940  if(m_MaxBlockCount < SIZE_MAX)
6941  {
6942  json.WriteString("Max");
6943  json.WriteNumber((uint64_t)m_MaxBlockCount);
6944  }
6945  json.WriteString("Cur");
6946  json.WriteNumber((uint64_t)m_Blocks.size());
6947  json.EndObject();
6948 
6949  if(m_FrameInUseCount > 0)
6950  {
6951  json.WriteString("FrameInUseCount");
6952  json.WriteNumber(m_FrameInUseCount);
6953  }
6954  }
6955  else
6956  {
6957  json.WriteString("PreferredBlockSize");
6958  json.WriteNumber(m_PreferredBlockSize);
6959  }
6960 
6961  json.WriteString("Blocks");
6962  json.BeginArray();
6963  for(size_t i = 0; i < m_Blocks.size(); ++i)
6964  {
6965  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
6966  }
6967  json.EndArray();
6968 
6969  json.EndObject();
6970 }
6971 
6972 #endif // #if VMA_STATS_STRING_ENABLED
6973 
6974 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
6975  VmaAllocator hAllocator,
6976  uint32_t currentFrameIndex)
6977 {
6978  if(m_pDefragmentator == VMA_NULL)
6979  {
6980  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
6981  hAllocator,
6982  this,
6983  currentFrameIndex);
6984  }
6985 
6986  return m_pDefragmentator;
6987 }
6988 
6989 VkResult VmaBlockVector::Defragment(
6990  VmaDefragmentationStats* pDefragmentationStats,
6991  VkDeviceSize& maxBytesToMove,
6992  uint32_t& maxAllocationsToMove)
6993 {
6994  if(m_pDefragmentator == VMA_NULL)
6995  {
6996  return VK_SUCCESS;
6997  }
6998 
6999  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
7000 
7001  // Defragment.
7002  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
7003 
7004  // Accumulate statistics.
7005  if(pDefragmentationStats != VMA_NULL)
7006  {
7007  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
7008  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
7009  pDefragmentationStats->bytesMoved += bytesMoved;
7010  pDefragmentationStats->allocationsMoved += allocationsMoved;
7011  VMA_ASSERT(bytesMoved <= maxBytesToMove);
7012  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
7013  maxBytesToMove -= bytesMoved;
7014  maxAllocationsToMove -= allocationsMoved;
7015  }
7016 
7017  // Free empty blocks.
7018  m_HasEmptyBlock = false;
7019  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
7020  {
7021  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
7022  if(pBlock->m_Metadata.IsEmpty())
7023  {
7024  if(m_Blocks.size() > m_MinBlockCount)
7025  {
7026  if(pDefragmentationStats != VMA_NULL)
7027  {
7028  ++pDefragmentationStats->deviceMemoryBlocksFreed;
7029  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
7030  }
7031 
7032  VmaVectorRemove(m_Blocks, blockIndex);
7033  pBlock->Destroy(m_hAllocator);
7034  vma_delete(m_hAllocator, pBlock);
7035  }
7036  else
7037  {
7038  m_HasEmptyBlock = true;
7039  }
7040  }
7041  }
7042 
7043  return result;
7044 }
7045 
7046 void VmaBlockVector::DestroyDefragmentator()
7047 {
7048  if(m_pDefragmentator != VMA_NULL)
7049  {
7050  vma_delete(m_hAllocator, m_pDefragmentator);
7051  m_pDefragmentator = VMA_NULL;
7052  }
7053 }
7054 
7055 void VmaBlockVector::MakePoolAllocationsLost(
7056  uint32_t currentFrameIndex,
7057  size_t* pLostAllocationCount)
7058 {
7059  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
7060  size_t lostAllocationCount = 0;
7061  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
7062  {
7063  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
7064  VMA_ASSERT(pBlock);
7065  lostAllocationCount += pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
7066  }
7067  if(pLostAllocationCount != VMA_NULL)
7068  {
7069  *pLostAllocationCount = lostAllocationCount;
7070  }
7071 }
7072 
7073 void VmaBlockVector::AddStats(VmaStats* pStats)
7074 {
7075  const uint32_t memTypeIndex = m_MemoryTypeIndex;
7076  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
7077 
7078  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
7079 
7080  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
7081  {
7082  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
7083  VMA_ASSERT(pBlock);
7084  VMA_HEAVY_ASSERT(pBlock->Validate());
7085  VmaStatInfo allocationStatInfo;
7086  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
7087  VmaAddStatInfo(pStats->total, allocationStatInfo);
7088  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
7089  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
7090  }
7091 }
7092 
7094 // VmaDefragmentator members definition
7095 
7096 VmaDefragmentator::VmaDefragmentator(
7097  VmaAllocator hAllocator,
7098  VmaBlockVector* pBlockVector,
7099  uint32_t currentFrameIndex) :
7100  m_hAllocator(hAllocator),
7101  m_pBlockVector(pBlockVector),
7102  m_CurrentFrameIndex(currentFrameIndex),
7103  m_BytesMoved(0),
7104  m_AllocationsMoved(0),
7105  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
7106  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
7107 {
7108 }
7109 
7110 VmaDefragmentator::~VmaDefragmentator()
7111 {
7112  for(size_t i = m_Blocks.size(); i--; )
7113  {
7114  vma_delete(m_hAllocator, m_Blocks[i]);
7115  }
7116 }
7117 
7118 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
7119 {
7120  AllocationInfo allocInfo;
7121  allocInfo.m_hAllocation = hAlloc;
7122  allocInfo.m_pChanged = pChanged;
7123  m_Allocations.push_back(allocInfo);
7124 }
7125 
7126 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
7127 {
7128  // It has already been mapped for defragmentation.
7129  if(m_pMappedDataForDefragmentation)
7130  {
7131  *ppMappedData = m_pMappedDataForDefragmentation;
7132  return VK_SUCCESS;
7133  }
7134 
7135  // It is originally mapped.
7136  if(m_pBlock->GetMappedData())
7137  {
7138  *ppMappedData = m_pBlock->GetMappedData();
7139  return VK_SUCCESS;
7140  }
7141 
7142  // Map on first usage.
7143  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
7144  *ppMappedData = m_pMappedDataForDefragmentation;
7145  return res;
7146 }
7147 
7148 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
7149 {
7150  if(m_pMappedDataForDefragmentation != VMA_NULL)
7151  {
7152  m_pBlock->Unmap(hAllocator, 1);
7153  }
7154 }
7155 
7156 VkResult VmaDefragmentator::DefragmentRound(
7157  VkDeviceSize maxBytesToMove,
7158  uint32_t maxAllocationsToMove)
7159 {
7160  if(m_Blocks.empty())
7161  {
7162  return VK_SUCCESS;
7163  }
7164 
7165  size_t srcBlockIndex = m_Blocks.size() - 1;
7166  size_t srcAllocIndex = SIZE_MAX;
7167  for(;;)
7168  {
7169  // 1. Find next allocation to move.
7170  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
7171  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
7172  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
7173  {
7174  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
7175  {
7176  // Finished: no more allocations to process.
7177  if(srcBlockIndex == 0)
7178  {
7179  return VK_SUCCESS;
7180  }
7181  else
7182  {
7183  --srcBlockIndex;
7184  srcAllocIndex = SIZE_MAX;
7185  }
7186  }
7187  else
7188  {
7189  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
7190  }
7191  }
7192 
7193  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
7194  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
7195 
7196  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
7197  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
7198  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
7199  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
7200 
7201  // 2. Try to find new place for this allocation in preceding or current block.
7202  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
7203  {
7204  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
7205  VmaAllocationRequest dstAllocRequest;
7206  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
7207  m_CurrentFrameIndex,
7208  m_pBlockVector->GetFrameInUseCount(),
7209  m_pBlockVector->GetBufferImageGranularity(),
7210  size,
7211  alignment,
7212  suballocType,
7213  false, // canMakeOtherLost
7214  &dstAllocRequest) &&
7215  MoveMakesSense(
7216  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
7217  {
7218  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
7219 
7220  // Reached limit on number of allocations or bytes to move.
7221  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
7222  (m_BytesMoved + size > maxBytesToMove))
7223  {
7224  return VK_INCOMPLETE;
7225  }
7226 
7227  void* pDstMappedData = VMA_NULL;
7228  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
7229  if(res != VK_SUCCESS)
7230  {
7231  return res;
7232  }
7233 
7234  void* pSrcMappedData = VMA_NULL;
7235  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
7236  if(res != VK_SUCCESS)
7237  {
7238  return res;
7239  }
7240 
7241  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
7242  memcpy(
7243  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
7244  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
7245  static_cast<size_t>(size));
7246 
7247  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
7248  pSrcBlockInfo->m_pBlock->m_Metadata.FreeAtOffset(srcOffset);
7249 
7250  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
7251 
7252  if(allocInfo.m_pChanged != VMA_NULL)
7253  {
7254  *allocInfo.m_pChanged = VK_TRUE;
7255  }
7256 
7257  ++m_AllocationsMoved;
7258  m_BytesMoved += size;
7259 
7260  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
7261 
7262  break;
7263  }
7264  }
7265 
7266  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
7267 
7268  if(srcAllocIndex > 0)
7269  {
7270  --srcAllocIndex;
7271  }
7272  else
7273  {
7274  if(srcBlockIndex > 0)
7275  {
7276  --srcBlockIndex;
7277  srcAllocIndex = SIZE_MAX;
7278  }
7279  else
7280  {
7281  return VK_SUCCESS;
7282  }
7283  }
7284  }
7285 }
7286 
7287 VkResult VmaDefragmentator::Defragment(
7288  VkDeviceSize maxBytesToMove,
7289  uint32_t maxAllocationsToMove)
7290 {
7291  if(m_Allocations.empty())
7292  {
7293  return VK_SUCCESS;
7294  }
7295 
7296  // Create block info for each block.
7297  const size_t blockCount = m_pBlockVector->m_Blocks.size();
7298  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7299  {
7300  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
7301  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
7302  m_Blocks.push_back(pBlockInfo);
7303  }
7304 
7305  // Sort them by m_pBlock pointer value.
7306  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
7307 
7308  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
7309  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
7310  {
7311  AllocationInfo& allocInfo = m_Allocations[blockIndex];
7312  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
7313  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
7314  {
7315  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
7316  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
7317  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
7318  {
7319  (*it)->m_Allocations.push_back(allocInfo);
7320  }
7321  else
7322  {
7323  VMA_ASSERT(0);
7324  }
7325  }
7326  }
7327  m_Allocations.clear();
7328 
7329  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7330  {
7331  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
7332  pBlockInfo->CalcHasNonMovableAllocations();
7333  pBlockInfo->SortAllocationsBySizeDescecnding();
7334  }
7335 
7336  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
7337  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
7338 
7339  // Execute defragmentation rounds (the main part).
7340  VkResult result = VK_SUCCESS;
7341  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
7342  {
7343  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
7344  }
7345 
7346  // Unmap blocks that were mapped for defragmentation.
7347  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7348  {
7349  m_Blocks[blockIndex]->Unmap(m_hAllocator);
7350  }
7351 
7352  return result;
7353 }
7354 
7355 bool VmaDefragmentator::MoveMakesSense(
7356  size_t dstBlockIndex, VkDeviceSize dstOffset,
7357  size_t srcBlockIndex, VkDeviceSize srcOffset)
7358 {
7359  if(dstBlockIndex < srcBlockIndex)
7360  {
7361  return true;
7362  }
7363  if(dstBlockIndex > srcBlockIndex)
7364  {
7365  return false;
7366  }
7367  if(dstOffset < srcOffset)
7368  {
7369  return true;
7370  }
7371  return false;
7372 }
7373 
7375 // VmaAllocator_T
7376 
7377 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
7378  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
7379  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
7380  m_hDevice(pCreateInfo->device),
7381  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
7382  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
7383  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
7384  m_PreferredLargeHeapBlockSize(0),
7385  m_PhysicalDevice(pCreateInfo->physicalDevice),
7386  m_CurrentFrameIndex(0),
7387  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
7388 {
7389  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
7390 
7391  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
7392  memset(&m_MemProps, 0, sizeof(m_MemProps));
7393  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
7394 
7395  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
7396  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
7397 
7398  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7399  {
7400  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
7401  }
7402 
7403  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
7404  {
7405  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
7406  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
7407  }
7408 
7409  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
7410 
7411  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
7412  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
7413 
7414  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
7415  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
7416 
7417  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
7418  {
7419  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
7420  {
7421  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
7422  if(limit != VK_WHOLE_SIZE)
7423  {
7424  m_HeapSizeLimit[heapIndex] = limit;
7425  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
7426  {
7427  m_MemProps.memoryHeaps[heapIndex].size = limit;
7428  }
7429  }
7430  }
7431  }
7432 
7433  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7434  {
7435  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
7436 
7437  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
7438  this,
7439  memTypeIndex,
7440  preferredBlockSize,
7441  0,
7442  SIZE_MAX,
7443  GetBufferImageGranularity(),
7444  pCreateInfo->frameInUseCount,
7445  false); // isCustomPool
7446  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
7447  // becase minBlockCount is 0.
7448  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
7449  }
7450 }
7451 
7452 VmaAllocator_T::~VmaAllocator_T()
7453 {
7454  VMA_ASSERT(m_Pools.empty());
7455 
7456  for(size_t i = GetMemoryTypeCount(); i--; )
7457  {
7458  vma_delete(this, m_pDedicatedAllocations[i]);
7459  vma_delete(this, m_pBlockVectors[i]);
7460  }
7461 }
7462 
7463 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
7464 {
7465 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7466  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
7467  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
7468  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
7469  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
7470  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
7471  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
7472  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
7473  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
7474  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
7475  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
7476  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
7477  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
7478  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
7479  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
7480  if(m_UseKhrDedicatedAllocation)
7481  {
7482  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
7483  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
7484  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
7485  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
7486  }
7487 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7488 
7489 #define VMA_COPY_IF_NOT_NULL(funcName) \
7490  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
7491 
7492  if(pVulkanFunctions != VMA_NULL)
7493  {
7494  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
7495  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
7496  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
7497  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
7498  VMA_COPY_IF_NOT_NULL(vkMapMemory);
7499  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
7500  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
7501  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
7502  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
7503  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
7504  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
7505  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
7506  VMA_COPY_IF_NOT_NULL(vkCreateImage);
7507  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
7508  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
7509  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
7510  }
7511 
7512 #undef VMA_COPY_IF_NOT_NULL
7513 
7514  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
7515  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
7516  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
7517  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
7518  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
7519  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
7520  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
7521  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
7522  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
7523  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
7524  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
7525  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
7526  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
7527  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
7528  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
7529  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
7530  if(m_UseKhrDedicatedAllocation)
7531  {
7532  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
7533  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
7534  }
7535 }
7536 
7537 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
7538 {
7539  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7540  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
7541  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
7542  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
7543 }
7544 
7545 VkResult VmaAllocator_T::AllocateMemoryOfType(
7546  const VkMemoryRequirements& vkMemReq,
7547  bool dedicatedAllocation,
7548  VkBuffer dedicatedBuffer,
7549  VkImage dedicatedImage,
7550  const VmaAllocationCreateInfo& createInfo,
7551  uint32_t memTypeIndex,
7552  VmaSuballocationType suballocType,
7553  VmaAllocation* pAllocation)
7554 {
7555  VMA_ASSERT(pAllocation != VMA_NULL);
7556  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
7557 
7558  VmaAllocationCreateInfo finalCreateInfo = createInfo;
7559 
7560  // If memory type is not HOST_VISIBLE, disable MAPPED.
7561  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7562  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
7563  {
7564  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
7565  }
7566 
7567  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
7568  VMA_ASSERT(blockVector);
7569 
7570  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
7571  bool preferDedicatedMemory =
7572  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
7573  dedicatedAllocation ||
7574  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
7575  vkMemReq.size > preferredBlockSize / 2;
7576 
7577  if(preferDedicatedMemory &&
7578  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
7579  finalCreateInfo.pool == VK_NULL_HANDLE)
7580  {
7582  }
7583 
7584  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
7585  {
7586  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7587  {
7588  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7589  }
7590  else
7591  {
7592  return AllocateDedicatedMemory(
7593  vkMemReq.size,
7594  suballocType,
7595  memTypeIndex,
7596  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
7597  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
7598  finalCreateInfo.pUserData,
7599  dedicatedBuffer,
7600  dedicatedImage,
7601  pAllocation);
7602  }
7603  }
7604  else
7605  {
7606  VkResult res = blockVector->Allocate(
7607  VK_NULL_HANDLE, // hCurrentPool
7608  m_CurrentFrameIndex.load(),
7609  vkMemReq,
7610  finalCreateInfo,
7611  suballocType,
7612  pAllocation);
7613  if(res == VK_SUCCESS)
7614  {
7615  return res;
7616  }
7617 
7618  // 5. Try dedicated memory.
7619  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7620  {
7621  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7622  }
7623  else
7624  {
7625  res = AllocateDedicatedMemory(
7626  vkMemReq.size,
7627  suballocType,
7628  memTypeIndex,
7629  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
7630  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
7631  finalCreateInfo.pUserData,
7632  dedicatedBuffer,
7633  dedicatedImage,
7634  pAllocation);
7635  if(res == VK_SUCCESS)
7636  {
7637  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
7638  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
7639  return VK_SUCCESS;
7640  }
7641  else
7642  {
7643  // Everything failed: Return error code.
7644  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7645  return res;
7646  }
7647  }
7648  }
7649 }
7650 
7651 VkResult VmaAllocator_T::AllocateDedicatedMemory(
7652  VkDeviceSize size,
7653  VmaSuballocationType suballocType,
7654  uint32_t memTypeIndex,
7655  bool map,
7656  bool isUserDataString,
7657  void* pUserData,
7658  VkBuffer dedicatedBuffer,
7659  VkImage dedicatedImage,
7660  VmaAllocation* pAllocation)
7661 {
7662  VMA_ASSERT(pAllocation);
7663 
7664  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
7665  allocInfo.memoryTypeIndex = memTypeIndex;
7666  allocInfo.allocationSize = size;
7667 
7668  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
7669  if(m_UseKhrDedicatedAllocation)
7670  {
7671  if(dedicatedBuffer != VK_NULL_HANDLE)
7672  {
7673  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
7674  dedicatedAllocInfo.buffer = dedicatedBuffer;
7675  allocInfo.pNext = &dedicatedAllocInfo;
7676  }
7677  else if(dedicatedImage != VK_NULL_HANDLE)
7678  {
7679  dedicatedAllocInfo.image = dedicatedImage;
7680  allocInfo.pNext = &dedicatedAllocInfo;
7681  }
7682  }
7683 
7684  // Allocate VkDeviceMemory.
7685  VkDeviceMemory hMemory = VK_NULL_HANDLE;
7686  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
7687  if(res < 0)
7688  {
7689  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7690  return res;
7691  }
7692 
7693  void* pMappedData = VMA_NULL;
7694  if(map)
7695  {
7696  res = (*m_VulkanFunctions.vkMapMemory)(
7697  m_hDevice,
7698  hMemory,
7699  0,
7700  VK_WHOLE_SIZE,
7701  0,
7702  &pMappedData);
7703  if(res < 0)
7704  {
7705  VMA_DEBUG_LOG(" vkMapMemory FAILED");
7706  FreeVulkanMemory(memTypeIndex, size, hMemory);
7707  return res;
7708  }
7709  }
7710 
7711  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
7712  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
7713  (*pAllocation)->SetUserData(this, pUserData);
7714 
7715  // Register it in m_pDedicatedAllocations.
7716  {
7717  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7718  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
7719  VMA_ASSERT(pDedicatedAllocations);
7720  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
7721  }
7722 
7723  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
7724 
7725  return VK_SUCCESS;
7726 }
7727 
7728 void VmaAllocator_T::GetBufferMemoryRequirements(
7729  VkBuffer hBuffer,
7730  VkMemoryRequirements& memReq,
7731  bool& requiresDedicatedAllocation,
7732  bool& prefersDedicatedAllocation) const
7733 {
7734  if(m_UseKhrDedicatedAllocation)
7735  {
7736  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
7737  memReqInfo.buffer = hBuffer;
7738 
7739  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7740 
7741  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7742  memReq2.pNext = &memDedicatedReq;
7743 
7744  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7745 
7746  memReq = memReq2.memoryRequirements;
7747  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7748  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7749  }
7750  else
7751  {
7752  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
7753  requiresDedicatedAllocation = false;
7754  prefersDedicatedAllocation = false;
7755  }
7756 }
7757 
7758 void VmaAllocator_T::GetImageMemoryRequirements(
7759  VkImage hImage,
7760  VkMemoryRequirements& memReq,
7761  bool& requiresDedicatedAllocation,
7762  bool& prefersDedicatedAllocation) const
7763 {
7764  if(m_UseKhrDedicatedAllocation)
7765  {
7766  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
7767  memReqInfo.image = hImage;
7768 
7769  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7770 
7771  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7772  memReq2.pNext = &memDedicatedReq;
7773 
7774  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7775 
7776  memReq = memReq2.memoryRequirements;
7777  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7778  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7779  }
7780  else
7781  {
7782  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
7783  requiresDedicatedAllocation = false;
7784  prefersDedicatedAllocation = false;
7785  }
7786 }
7787 
7788 VkResult VmaAllocator_T::AllocateMemory(
7789  const VkMemoryRequirements& vkMemReq,
7790  bool requiresDedicatedAllocation,
7791  bool prefersDedicatedAllocation,
7792  VkBuffer dedicatedBuffer,
7793  VkImage dedicatedImage,
7794  const VmaAllocationCreateInfo& createInfo,
7795  VmaSuballocationType suballocType,
7796  VmaAllocation* pAllocation)
7797 {
7798  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
7799  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7800  {
7801  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
7802  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7803  }
7804  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7806  {
7807  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
7808  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7809  }
7810  if(requiresDedicatedAllocation)
7811  {
7812  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7813  {
7814  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
7815  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7816  }
7817  if(createInfo.pool != VK_NULL_HANDLE)
7818  {
7819  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
7820  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7821  }
7822  }
7823  if((createInfo.pool != VK_NULL_HANDLE) &&
7824  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
7825  {
7826  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
7827  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7828  }
7829 
7830  if(createInfo.pool != VK_NULL_HANDLE)
7831  {
7832  return createInfo.pool->m_BlockVector.Allocate(
7833  createInfo.pool,
7834  m_CurrentFrameIndex.load(),
7835  vkMemReq,
7836  createInfo,
7837  suballocType,
7838  pAllocation);
7839  }
7840  else
7841  {
7842  // Bit mask of memory Vulkan types acceptable for this allocation.
7843  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
7844  uint32_t memTypeIndex = UINT32_MAX;
7845  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7846  if(res == VK_SUCCESS)
7847  {
7848  res = AllocateMemoryOfType(
7849  vkMemReq,
7850  requiresDedicatedAllocation || prefersDedicatedAllocation,
7851  dedicatedBuffer,
7852  dedicatedImage,
7853  createInfo,
7854  memTypeIndex,
7855  suballocType,
7856  pAllocation);
7857  // Succeeded on first try.
7858  if(res == VK_SUCCESS)
7859  {
7860  return res;
7861  }
7862  // Allocation from this memory type failed. Try other compatible memory types.
7863  else
7864  {
7865  for(;;)
7866  {
7867  // Remove old memTypeIndex from list of possibilities.
7868  memoryTypeBits &= ~(1u << memTypeIndex);
7869  // Find alternative memTypeIndex.
7870  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7871  if(res == VK_SUCCESS)
7872  {
7873  res = AllocateMemoryOfType(
7874  vkMemReq,
7875  requiresDedicatedAllocation || prefersDedicatedAllocation,
7876  dedicatedBuffer,
7877  dedicatedImage,
7878  createInfo,
7879  memTypeIndex,
7880  suballocType,
7881  pAllocation);
7882  // Allocation from this alternative memory type succeeded.
7883  if(res == VK_SUCCESS)
7884  {
7885  return res;
7886  }
7887  // else: Allocation from this memory type failed. Try next one - next loop iteration.
7888  }
7889  // No other matching memory type index could be found.
7890  else
7891  {
7892  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
7893  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7894  }
7895  }
7896  }
7897  }
7898  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
7899  else
7900  return res;
7901  }
7902 }
7903 
7904 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
7905 {
7906  VMA_ASSERT(allocation);
7907 
7908  if(allocation->CanBecomeLost() == false ||
7909  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
7910  {
7911  switch(allocation->GetType())
7912  {
7913  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7914  {
7915  VmaBlockVector* pBlockVector = VMA_NULL;
7916  VmaPool hPool = allocation->GetPool();
7917  if(hPool != VK_NULL_HANDLE)
7918  {
7919  pBlockVector = &hPool->m_BlockVector;
7920  }
7921  else
7922  {
7923  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
7924  pBlockVector = m_pBlockVectors[memTypeIndex];
7925  }
7926  pBlockVector->Free(allocation);
7927  }
7928  break;
7929  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7930  FreeDedicatedMemory(allocation);
7931  break;
7932  default:
7933  VMA_ASSERT(0);
7934  }
7935  }
7936 
7937  allocation->SetUserData(this, VMA_NULL);
7938  vma_delete(this, allocation);
7939 }
7940 
7941 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
7942 {
7943  // Initialize.
7944  InitStatInfo(pStats->total);
7945  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
7946  InitStatInfo(pStats->memoryType[i]);
7947  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7948  InitStatInfo(pStats->memoryHeap[i]);
7949 
7950  // Process default pools.
7951  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7952  {
7953  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
7954  VMA_ASSERT(pBlockVector);
7955  pBlockVector->AddStats(pStats);
7956  }
7957 
7958  // Process custom pools.
7959  {
7960  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7961  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
7962  {
7963  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
7964  }
7965  }
7966 
7967  // Process dedicated allocations.
7968  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7969  {
7970  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7971  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7972  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
7973  VMA_ASSERT(pDedicatedAllocVector);
7974  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
7975  {
7976  VmaStatInfo allocationStatInfo;
7977  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
7978  VmaAddStatInfo(pStats->total, allocationStatInfo);
7979  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
7980  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
7981  }
7982  }
7983 
7984  // Postprocess.
7985  VmaPostprocessCalcStatInfo(pStats->total);
7986  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
7987  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
7988  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
7989  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
7990 }
7991 
7992 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
7993 
7994 VkResult VmaAllocator_T::Defragment(
7995  VmaAllocation* pAllocations,
7996  size_t allocationCount,
7997  VkBool32* pAllocationsChanged,
7998  const VmaDefragmentationInfo* pDefragmentationInfo,
7999  VmaDefragmentationStats* pDefragmentationStats)
8000 {
8001  if(pAllocationsChanged != VMA_NULL)
8002  {
8003  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
8004  }
8005  if(pDefragmentationStats != VMA_NULL)
8006  {
8007  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
8008  }
8009 
8010  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
8011 
8012  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
8013 
8014  const size_t poolCount = m_Pools.size();
8015 
8016  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
8017  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
8018  {
8019  VmaAllocation hAlloc = pAllocations[allocIndex];
8020  VMA_ASSERT(hAlloc);
8021  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
8022  // DedicatedAlloc cannot be defragmented.
8023  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
8024  // Only HOST_VISIBLE memory types can be defragmented.
8025  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
8026  // Lost allocation cannot be defragmented.
8027  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
8028  {
8029  VmaBlockVector* pAllocBlockVector = VMA_NULL;
8030 
8031  const VmaPool hAllocPool = hAlloc->GetPool();
8032  // This allocation belongs to custom pool.
8033  if(hAllocPool != VK_NULL_HANDLE)
8034  {
8035  pAllocBlockVector = &hAllocPool->GetBlockVector();
8036  }
8037  // This allocation belongs to general pool.
8038  else
8039  {
8040  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
8041  }
8042 
8043  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
8044 
8045  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
8046  &pAllocationsChanged[allocIndex] : VMA_NULL;
8047  pDefragmentator->AddAllocation(hAlloc, pChanged);
8048  }
8049  }
8050 
8051  VkResult result = VK_SUCCESS;
8052 
8053  // ======== Main processing.
8054 
8055  VkDeviceSize maxBytesToMove = SIZE_MAX;
8056  uint32_t maxAllocationsToMove = UINT32_MAX;
8057  if(pDefragmentationInfo != VMA_NULL)
8058  {
8059  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
8060  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
8061  }
8062 
8063  // Process standard memory.
8064  for(uint32_t memTypeIndex = 0;
8065  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
8066  ++memTypeIndex)
8067  {
8068  // Only HOST_VISIBLE memory types can be defragmented.
8069  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
8070  {
8071  result = m_pBlockVectors[memTypeIndex]->Defragment(
8072  pDefragmentationStats,
8073  maxBytesToMove,
8074  maxAllocationsToMove);
8075  }
8076  }
8077 
8078  // Process custom pools.
8079  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
8080  {
8081  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
8082  pDefragmentationStats,
8083  maxBytesToMove,
8084  maxAllocationsToMove);
8085  }
8086 
8087  // ======== Destroy defragmentators.
8088 
8089  // Process custom pools.
8090  for(size_t poolIndex = poolCount; poolIndex--; )
8091  {
8092  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
8093  }
8094 
8095  // Process standard memory.
8096  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
8097  {
8098  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
8099  {
8100  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
8101  }
8102  }
8103 
8104  return result;
8105 }
8106 
8107 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
8108 {
8109  if(hAllocation->CanBecomeLost())
8110  {
8111  /*
8112  Warning: This is a carefully designed algorithm.
8113  Do not modify unless you really know what you're doing :)
8114  */
8115  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
8116  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
8117  for(;;)
8118  {
8119  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
8120  {
8121  pAllocationInfo->memoryType = UINT32_MAX;
8122  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
8123  pAllocationInfo->offset = 0;
8124  pAllocationInfo->size = hAllocation->GetSize();
8125  pAllocationInfo->pMappedData = VMA_NULL;
8126  pAllocationInfo->pUserData = hAllocation->GetUserData();
8127  return;
8128  }
8129  else if(localLastUseFrameIndex == localCurrFrameIndex)
8130  {
8131  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
8132  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
8133  pAllocationInfo->offset = hAllocation->GetOffset();
8134  pAllocationInfo->size = hAllocation->GetSize();
8135  pAllocationInfo->pMappedData = VMA_NULL;
8136  pAllocationInfo->pUserData = hAllocation->GetUserData();
8137  return;
8138  }
8139  else // Last use time earlier than current time.
8140  {
8141  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
8142  {
8143  localLastUseFrameIndex = localCurrFrameIndex;
8144  }
8145  }
8146  }
8147  }
8148  else
8149  {
8150  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
8151  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
8152  pAllocationInfo->offset = hAllocation->GetOffset();
8153  pAllocationInfo->size = hAllocation->GetSize();
8154  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
8155  pAllocationInfo->pUserData = hAllocation->GetUserData();
8156  }
8157 }
8158 
8159 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
8160 {
8161  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
8162  if(hAllocation->CanBecomeLost())
8163  {
8164  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
8165  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
8166  for(;;)
8167  {
8168  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
8169  {
8170  return false;
8171  }
8172  else if(localLastUseFrameIndex == localCurrFrameIndex)
8173  {
8174  return true;
8175  }
8176  else // Last use time earlier than current time.
8177  {
8178  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
8179  {
8180  localLastUseFrameIndex = localCurrFrameIndex;
8181  }
8182  }
8183  }
8184  }
8185  else
8186  {
8187  return true;
8188  }
8189 }
8190 
8191 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
8192 {
8193  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
8194 
8195  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
8196 
8197  if(newCreateInfo.maxBlockCount == 0)
8198  {
8199  newCreateInfo.maxBlockCount = SIZE_MAX;
8200  }
8201  if(newCreateInfo.blockSize == 0)
8202  {
8203  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
8204  }
8205 
8206  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
8207 
8208  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
8209  if(res != VK_SUCCESS)
8210  {
8211  vma_delete(this, *pPool);
8212  *pPool = VMA_NULL;
8213  return res;
8214  }
8215 
8216  // Add to m_Pools.
8217  {
8218  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
8219  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
8220  }
8221 
8222  return VK_SUCCESS;
8223 }
8224 
8225 void VmaAllocator_T::DestroyPool(VmaPool pool)
8226 {
8227  // Remove from m_Pools.
8228  {
8229  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
8230  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
8231  VMA_ASSERT(success && "Pool not found in Allocator.");
8232  }
8233 
8234  vma_delete(this, pool);
8235 }
8236 
8237 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
8238 {
8239  pool->m_BlockVector.GetPoolStats(pPoolStats);
8240 }
8241 
8242 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
8243 {
8244  m_CurrentFrameIndex.store(frameIndex);
8245 }
8246 
8247 void VmaAllocator_T::MakePoolAllocationsLost(
8248  VmaPool hPool,
8249  size_t* pLostAllocationCount)
8250 {
8251  hPool->m_BlockVector.MakePoolAllocationsLost(
8252  m_CurrentFrameIndex.load(),
8253  pLostAllocationCount);
8254 }
8255 
8256 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
8257 {
8258  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
8259  (*pAllocation)->InitLost();
8260 }
8261 
8262 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
8263 {
8264  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
8265 
8266  VkResult res;
8267  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
8268  {
8269  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
8270  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
8271  {
8272  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
8273  if(res == VK_SUCCESS)
8274  {
8275  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
8276  }
8277  }
8278  else
8279  {
8280  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
8281  }
8282  }
8283  else
8284  {
8285  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
8286  }
8287 
8288  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
8289  {
8290  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
8291  }
8292 
8293  return res;
8294 }
8295 
8296 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
8297 {
8298  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
8299  {
8300  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
8301  }
8302 
8303  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
8304 
8305  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
8306  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
8307  {
8308  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
8309  m_HeapSizeLimit[heapIndex] += size;
8310  }
8311 }
8312 
8313 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
8314 {
8315  if(hAllocation->CanBecomeLost())
8316  {
8317  return VK_ERROR_MEMORY_MAP_FAILED;
8318  }
8319 
8320  switch(hAllocation->GetType())
8321  {
8322  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8323  {
8324  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
8325  char *pBytes = VMA_NULL;
8326  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
8327  if(res == VK_SUCCESS)
8328  {
8329  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
8330  hAllocation->BlockAllocMap();
8331  }
8332  return res;
8333  }
8334  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8335  return hAllocation->DedicatedAllocMap(this, ppData);
8336  default:
8337  VMA_ASSERT(0);
8338  return VK_ERROR_MEMORY_MAP_FAILED;
8339  }
8340 }
8341 
8342 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
8343 {
8344  switch(hAllocation->GetType())
8345  {
8346  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8347  {
8348  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
8349  hAllocation->BlockAllocUnmap();
8350  pBlock->Unmap(this, 1);
8351  }
8352  break;
8353  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8354  hAllocation->DedicatedAllocUnmap(this);
8355  break;
8356  default:
8357  VMA_ASSERT(0);
8358  }
8359 }
8360 
8361 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
8362 {
8363  VkResult res = VK_SUCCESS;
8364  switch(hAllocation->GetType())
8365  {
8366  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8367  res = GetVulkanFunctions().vkBindBufferMemory(
8368  m_hDevice,
8369  hBuffer,
8370  hAllocation->GetMemory(),
8371  0); //memoryOffset
8372  break;
8373  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8374  {
8375  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
8376  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
8377  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
8378  break;
8379  }
8380  default:
8381  VMA_ASSERT(0);
8382  }
8383  return res;
8384 }
8385 
8386 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
8387 {
8388  VkResult res = VK_SUCCESS;
8389  switch(hAllocation->GetType())
8390  {
8391  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8392  res = GetVulkanFunctions().vkBindImageMemory(
8393  m_hDevice,
8394  hImage,
8395  hAllocation->GetMemory(),
8396  0); //memoryOffset
8397  break;
8398  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8399  {
8400  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
8401  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
8402  res = pBlock->BindImageMemory(this, hAllocation, hImage);
8403  break;
8404  }
8405  default:
8406  VMA_ASSERT(0);
8407  }
8408  return res;
8409 }
8410 
8411 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
8412 {
8413  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
8414 
8415  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
8416  {
8417  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
8418  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
8419  VMA_ASSERT(pDedicatedAllocations);
8420  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
8421  VMA_ASSERT(success);
8422  }
8423 
8424  VkDeviceMemory hMemory = allocation->GetMemory();
8425 
8426  if(allocation->GetMappedData() != VMA_NULL)
8427  {
8428  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
8429  }
8430 
8431  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
8432 
8433  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
8434 }
8435 
8436 #if VMA_STATS_STRING_ENABLED
8437 
8438 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
8439 {
8440  bool dedicatedAllocationsStarted = false;
8441  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8442  {
8443  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
8444  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
8445  VMA_ASSERT(pDedicatedAllocVector);
8446  if(pDedicatedAllocVector->empty() == false)
8447  {
8448  if(dedicatedAllocationsStarted == false)
8449  {
8450  dedicatedAllocationsStarted = true;
8451  json.WriteString("DedicatedAllocations");
8452  json.BeginObject();
8453  }
8454 
8455  json.BeginString("Type ");
8456  json.ContinueString(memTypeIndex);
8457  json.EndString();
8458 
8459  json.BeginArray();
8460 
8461  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
8462  {
8463  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
8464  json.BeginObject(true);
8465 
8466  json.WriteString("Type");
8467  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
8468 
8469  json.WriteString("Size");
8470  json.WriteNumber(hAlloc->GetSize());
8471 
8472  const void* pUserData = hAlloc->GetUserData();
8473  if(pUserData != VMA_NULL)
8474  {
8475  json.WriteString("UserData");
8476  if(hAlloc->IsUserDataString())
8477  {
8478  json.WriteString((const char*)pUserData);
8479  }
8480  else
8481  {
8482  json.BeginString();
8483  json.ContinueString_Pointer(pUserData);
8484  json.EndString();
8485  }
8486  }
8487 
8488  json.EndObject();
8489  }
8490 
8491  json.EndArray();
8492  }
8493  }
8494  if(dedicatedAllocationsStarted)
8495  {
8496  json.EndObject();
8497  }
8498 
8499  {
8500  bool allocationsStarted = false;
8501  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8502  {
8503  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
8504  {
8505  if(allocationsStarted == false)
8506  {
8507  allocationsStarted = true;
8508  json.WriteString("DefaultPools");
8509  json.BeginObject();
8510  }
8511 
8512  json.BeginString("Type ");
8513  json.ContinueString(memTypeIndex);
8514  json.EndString();
8515 
8516  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
8517  }
8518  }
8519  if(allocationsStarted)
8520  {
8521  json.EndObject();
8522  }
8523  }
8524 
8525  {
8526  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
8527  const size_t poolCount = m_Pools.size();
8528  if(poolCount > 0)
8529  {
8530  json.WriteString("Pools");
8531  json.BeginArray();
8532  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
8533  {
8534  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
8535  }
8536  json.EndArray();
8537  }
8538  }
8539 }
8540 
8541 #endif // #if VMA_STATS_STRING_ENABLED
8542 
8543 static VkResult AllocateMemoryForImage(
8544  VmaAllocator allocator,
8545  VkImage image,
8546  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8547  VmaSuballocationType suballocType,
8548  VmaAllocation* pAllocation)
8549 {
8550  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
8551 
8552  VkMemoryRequirements vkMemReq = {};
8553  bool requiresDedicatedAllocation = false;
8554  bool prefersDedicatedAllocation = false;
8555  allocator->GetImageMemoryRequirements(image, vkMemReq,
8556  requiresDedicatedAllocation, prefersDedicatedAllocation);
8557 
8558  return allocator->AllocateMemory(
8559  vkMemReq,
8560  requiresDedicatedAllocation,
8561  prefersDedicatedAllocation,
8562  VK_NULL_HANDLE, // dedicatedBuffer
8563  image, // dedicatedImage
8564  *pAllocationCreateInfo,
8565  suballocType,
8566  pAllocation);
8567 }
8568 
8570 // Public interface
8571 
8572 VkResult vmaCreateAllocator(
8573  const VmaAllocatorCreateInfo* pCreateInfo,
8574  VmaAllocator* pAllocator)
8575 {
8576  VMA_ASSERT(pCreateInfo && pAllocator);
8577  VMA_DEBUG_LOG("vmaCreateAllocator");
8578  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
8579  return VK_SUCCESS;
8580 }
8581 
8582 void vmaDestroyAllocator(
8583  VmaAllocator allocator)
8584 {
8585  if(allocator != VK_NULL_HANDLE)
8586  {
8587  VMA_DEBUG_LOG("vmaDestroyAllocator");
8588  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
8589  vma_delete(&allocationCallbacks, allocator);
8590  }
8591 }
8592 
8594  VmaAllocator allocator,
8595  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
8596 {
8597  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
8598  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
8599 }
8600 
8602  VmaAllocator allocator,
8603  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
8604 {
8605  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
8606  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
8607 }
8608 
8610  VmaAllocator allocator,
8611  uint32_t memoryTypeIndex,
8612  VkMemoryPropertyFlags* pFlags)
8613 {
8614  VMA_ASSERT(allocator && pFlags);
8615  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
8616  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
8617 }
8618 
8620  VmaAllocator allocator,
8621  uint32_t frameIndex)
8622 {
8623  VMA_ASSERT(allocator);
8624  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
8625 
8626  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8627 
8628  allocator->SetCurrentFrameIndex(frameIndex);
8629 }
8630 
8631 void vmaCalculateStats(
8632  VmaAllocator allocator,
8633  VmaStats* pStats)
8634 {
8635  VMA_ASSERT(allocator && pStats);
8636  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8637  allocator->CalculateStats(pStats);
8638 }
8639 
8640 #if VMA_STATS_STRING_ENABLED
8641 
8642 void vmaBuildStatsString(
8643  VmaAllocator allocator,
8644  char** ppStatsString,
8645  VkBool32 detailedMap)
8646 {
8647  VMA_ASSERT(allocator && ppStatsString);
8648  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8649 
8650  VmaStringBuilder sb(allocator);
8651  {
8652  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
8653  json.BeginObject();
8654 
8655  VmaStats stats;
8656  allocator->CalculateStats(&stats);
8657 
8658  json.WriteString("Total");
8659  VmaPrintStatInfo(json, stats.total);
8660 
8661  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
8662  {
8663  json.BeginString("Heap ");
8664  json.ContinueString(heapIndex);
8665  json.EndString();
8666  json.BeginObject();
8667 
8668  json.WriteString("Size");
8669  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
8670 
8671  json.WriteString("Flags");
8672  json.BeginArray(true);
8673  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
8674  {
8675  json.WriteString("DEVICE_LOCAL");
8676  }
8677  json.EndArray();
8678 
8679  if(stats.memoryHeap[heapIndex].blockCount > 0)
8680  {
8681  json.WriteString("Stats");
8682  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
8683  }
8684 
8685  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
8686  {
8687  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
8688  {
8689  json.BeginString("Type ");
8690  json.ContinueString(typeIndex);
8691  json.EndString();
8692 
8693  json.BeginObject();
8694 
8695  json.WriteString("Flags");
8696  json.BeginArray(true);
8697  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
8698  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
8699  {
8700  json.WriteString("DEVICE_LOCAL");
8701  }
8702  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
8703  {
8704  json.WriteString("HOST_VISIBLE");
8705  }
8706  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
8707  {
8708  json.WriteString("HOST_COHERENT");
8709  }
8710  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
8711  {
8712  json.WriteString("HOST_CACHED");
8713  }
8714  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
8715  {
8716  json.WriteString("LAZILY_ALLOCATED");
8717  }
8718  json.EndArray();
8719 
8720  if(stats.memoryType[typeIndex].blockCount > 0)
8721  {
8722  json.WriteString("Stats");
8723  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
8724  }
8725 
8726  json.EndObject();
8727  }
8728  }
8729 
8730  json.EndObject();
8731  }
8732  if(detailedMap == VK_TRUE)
8733  {
8734  allocator->PrintDetailedMap(json);
8735  }
8736 
8737  json.EndObject();
8738  }
8739 
8740  const size_t len = sb.GetLength();
8741  char* const pChars = vma_new_array(allocator, char, len + 1);
8742  if(len > 0)
8743  {
8744  memcpy(pChars, sb.GetData(), len);
8745  }
8746  pChars[len] = '\0';
8747  *ppStatsString = pChars;
8748 }
8749 
8750 void vmaFreeStatsString(
8751  VmaAllocator allocator,
8752  char* pStatsString)
8753 {
8754  if(pStatsString != VMA_NULL)
8755  {
8756  VMA_ASSERT(allocator);
8757  size_t len = strlen(pStatsString);
8758  vma_delete_array(allocator, pStatsString, len + 1);
8759  }
8760 }
8761 
8762 #endif // #if VMA_STATS_STRING_ENABLED
8763 
8764 /*
8765 This function is not protected by any mutex because it just reads immutable data.
8766 */
8767 VkResult vmaFindMemoryTypeIndex(
8768  VmaAllocator allocator,
8769  uint32_t memoryTypeBits,
8770  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8771  uint32_t* pMemoryTypeIndex)
8772 {
8773  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8774  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8775  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8776 
8777  if(pAllocationCreateInfo->memoryTypeBits != 0)
8778  {
8779  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
8780  }
8781 
8782  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
8783  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
8784 
8785  // Convert usage to requiredFlags and preferredFlags.
8786  switch(pAllocationCreateInfo->usage)
8787  {
8789  break;
8791  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8792  break;
8794  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
8795  break;
8797  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8798  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8799  break;
8801  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8802  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
8803  break;
8804  default:
8805  break;
8806  }
8807 
8808  *pMemoryTypeIndex = UINT32_MAX;
8809  uint32_t minCost = UINT32_MAX;
8810  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
8811  memTypeIndex < allocator->GetMemoryTypeCount();
8812  ++memTypeIndex, memTypeBit <<= 1)
8813  {
8814  // This memory type is acceptable according to memoryTypeBits bitmask.
8815  if((memTypeBit & memoryTypeBits) != 0)
8816  {
8817  const VkMemoryPropertyFlags currFlags =
8818  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
8819  // This memory type contains requiredFlags.
8820  if((requiredFlags & ~currFlags) == 0)
8821  {
8822  // Calculate cost as number of bits from preferredFlags not present in this memory type.
8823  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
8824  // Remember memory type with lowest cost.
8825  if(currCost < minCost)
8826  {
8827  *pMemoryTypeIndex = memTypeIndex;
8828  if(currCost == 0)
8829  {
8830  return VK_SUCCESS;
8831  }
8832  minCost = currCost;
8833  }
8834  }
8835  }
8836  }
8837  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
8838 }
8839 
8841  VmaAllocator allocator,
8842  const VkBufferCreateInfo* pBufferCreateInfo,
8843  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8844  uint32_t* pMemoryTypeIndex)
8845 {
8846  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8847  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
8848  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8849  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8850 
8851  const VkDevice hDev = allocator->m_hDevice;
8852  VkBuffer hBuffer = VK_NULL_HANDLE;
8853  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
8854  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
8855  if(res == VK_SUCCESS)
8856  {
8857  VkMemoryRequirements memReq = {};
8858  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
8859  hDev, hBuffer, &memReq);
8860 
8861  res = vmaFindMemoryTypeIndex(
8862  allocator,
8863  memReq.memoryTypeBits,
8864  pAllocationCreateInfo,
8865  pMemoryTypeIndex);
8866 
8867  allocator->GetVulkanFunctions().vkDestroyBuffer(
8868  hDev, hBuffer, allocator->GetAllocationCallbacks());
8869  }
8870  return res;
8871 }
8872 
8874  VmaAllocator allocator,
8875  const VkImageCreateInfo* pImageCreateInfo,
8876  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8877  uint32_t* pMemoryTypeIndex)
8878 {
8879  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8880  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
8881  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8882  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8883 
8884  const VkDevice hDev = allocator->m_hDevice;
8885  VkImage hImage = VK_NULL_HANDLE;
8886  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
8887  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
8888  if(res == VK_SUCCESS)
8889  {
8890  VkMemoryRequirements memReq = {};
8891  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
8892  hDev, hImage, &memReq);
8893 
8894  res = vmaFindMemoryTypeIndex(
8895  allocator,
8896  memReq.memoryTypeBits,
8897  pAllocationCreateInfo,
8898  pMemoryTypeIndex);
8899 
8900  allocator->GetVulkanFunctions().vkDestroyImage(
8901  hDev, hImage, allocator->GetAllocationCallbacks());
8902  }
8903  return res;
8904 }
8905 
8906 VkResult vmaCreatePool(
8907  VmaAllocator allocator,
8908  const VmaPoolCreateInfo* pCreateInfo,
8909  VmaPool* pPool)
8910 {
8911  VMA_ASSERT(allocator && pCreateInfo && pPool);
8912 
8913  VMA_DEBUG_LOG("vmaCreatePool");
8914 
8915  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8916 
8917  return allocator->CreatePool(pCreateInfo, pPool);
8918 }
8919 
8920 void vmaDestroyPool(
8921  VmaAllocator allocator,
8922  VmaPool pool)
8923 {
8924  VMA_ASSERT(allocator);
8925 
8926  if(pool == VK_NULL_HANDLE)
8927  {
8928  return;
8929  }
8930 
8931  VMA_DEBUG_LOG("vmaDestroyPool");
8932 
8933  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8934 
8935  allocator->DestroyPool(pool);
8936 }
8937 
8938 void vmaGetPoolStats(
8939  VmaAllocator allocator,
8940  VmaPool pool,
8941  VmaPoolStats* pPoolStats)
8942 {
8943  VMA_ASSERT(allocator && pool && pPoolStats);
8944 
8945  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8946 
8947  allocator->GetPoolStats(pool, pPoolStats);
8948 }
8949 
8951  VmaAllocator allocator,
8952  VmaPool pool,
8953  size_t* pLostAllocationCount)
8954 {
8955  VMA_ASSERT(allocator && pool);
8956 
8957  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8958 
8959  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
8960 }
8961 
8962 VkResult vmaAllocateMemory(
8963  VmaAllocator allocator,
8964  const VkMemoryRequirements* pVkMemoryRequirements,
8965  const VmaAllocationCreateInfo* pCreateInfo,
8966  VmaAllocation* pAllocation,
8967  VmaAllocationInfo* pAllocationInfo)
8968 {
8969  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
8970 
8971  VMA_DEBUG_LOG("vmaAllocateMemory");
8972 
8973  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8974 
8975  VkResult result = allocator->AllocateMemory(
8976  *pVkMemoryRequirements,
8977  false, // requiresDedicatedAllocation
8978  false, // prefersDedicatedAllocation
8979  VK_NULL_HANDLE, // dedicatedBuffer
8980  VK_NULL_HANDLE, // dedicatedImage
8981  *pCreateInfo,
8982  VMA_SUBALLOCATION_TYPE_UNKNOWN,
8983  pAllocation);
8984 
8985  if(pAllocationInfo && result == VK_SUCCESS)
8986  {
8987  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8988  }
8989 
8990  return result;
8991 }
8992 
8994  VmaAllocator allocator,
8995  VkBuffer buffer,
8996  const VmaAllocationCreateInfo* pCreateInfo,
8997  VmaAllocation* pAllocation,
8998  VmaAllocationInfo* pAllocationInfo)
8999 {
9000  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
9001 
9002  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
9003 
9004  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9005 
9006  VkMemoryRequirements vkMemReq = {};
9007  bool requiresDedicatedAllocation = false;
9008  bool prefersDedicatedAllocation = false;
9009  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
9010  requiresDedicatedAllocation,
9011  prefersDedicatedAllocation);
9012 
9013  VkResult result = allocator->AllocateMemory(
9014  vkMemReq,
9015  requiresDedicatedAllocation,
9016  prefersDedicatedAllocation,
9017  buffer, // dedicatedBuffer
9018  VK_NULL_HANDLE, // dedicatedImage
9019  *pCreateInfo,
9020  VMA_SUBALLOCATION_TYPE_BUFFER,
9021  pAllocation);
9022 
9023  if(pAllocationInfo && result == VK_SUCCESS)
9024  {
9025  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
9026  }
9027 
9028  return result;
9029 }
9030 
9031 VkResult vmaAllocateMemoryForImage(
9032  VmaAllocator allocator,
9033  VkImage image,
9034  const VmaAllocationCreateInfo* pCreateInfo,
9035  VmaAllocation* pAllocation,
9036  VmaAllocationInfo* pAllocationInfo)
9037 {
9038  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
9039 
9040  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
9041 
9042  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9043 
9044  VkResult result = AllocateMemoryForImage(
9045  allocator,
9046  image,
9047  pCreateInfo,
9048  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
9049  pAllocation);
9050 
9051  if(pAllocationInfo && result == VK_SUCCESS)
9052  {
9053  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
9054  }
9055 
9056  return result;
9057 }
9058 
9059 void vmaFreeMemory(
9060  VmaAllocator allocator,
9061  VmaAllocation allocation)
9062 {
9063  VMA_ASSERT(allocator && allocation);
9064 
9065  VMA_DEBUG_LOG("vmaFreeMemory");
9066 
9067  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9068 
9069  allocator->FreeMemory(allocation);
9070 }
9071 
9073  VmaAllocator allocator,
9074  VmaAllocation allocation,
9075  VmaAllocationInfo* pAllocationInfo)
9076 {
9077  VMA_ASSERT(allocator && allocation && pAllocationInfo);
9078 
9079  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9080 
9081  allocator->GetAllocationInfo(allocation, pAllocationInfo);
9082 }
9083 
9084 VkBool32 vmaTouchAllocation(
9085  VmaAllocator allocator,
9086  VmaAllocation allocation)
9087 {
9088  VMA_ASSERT(allocator && allocation);
9089 
9090  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9091 
9092  return allocator->TouchAllocation(allocation);
9093 }
9094 
9096  VmaAllocator allocator,
9097  VmaAllocation allocation,
9098  void* pUserData)
9099 {
9100  VMA_ASSERT(allocator && allocation);
9101 
9102  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9103 
9104  allocation->SetUserData(allocator, pUserData);
9105 }
9106 
9108  VmaAllocator allocator,
9109  VmaAllocation* pAllocation)
9110 {
9111  VMA_ASSERT(allocator && pAllocation);
9112 
9113  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
9114 
9115  allocator->CreateLostAllocation(pAllocation);
9116 }
9117 
9118 VkResult vmaMapMemory(
9119  VmaAllocator allocator,
9120  VmaAllocation allocation,
9121  void** ppData)
9122 {
9123  VMA_ASSERT(allocator && allocation && ppData);
9124 
9125  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9126 
9127  return allocator->Map(allocation, ppData);
9128 }
9129 
9130 void vmaUnmapMemory(
9131  VmaAllocator allocator,
9132  VmaAllocation allocation)
9133 {
9134  VMA_ASSERT(allocator && allocation);
9135 
9136  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9137 
9138  allocator->Unmap(allocation);
9139 }
9140 
9141 VkResult vmaDefragment(
9142  VmaAllocator allocator,
9143  VmaAllocation* pAllocations,
9144  size_t allocationCount,
9145  VkBool32* pAllocationsChanged,
9146  const VmaDefragmentationInfo *pDefragmentationInfo,
9147  VmaDefragmentationStats* pDefragmentationStats)
9148 {
9149  VMA_ASSERT(allocator && pAllocations);
9150 
9151  VMA_DEBUG_LOG("vmaDefragment");
9152 
9153  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9154 
9155  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
9156 }
9157 
9158 VkResult vmaBindBufferMemory(
9159  VmaAllocator allocator,
9160  VmaAllocation allocation,
9161  VkBuffer buffer)
9162 {
9163  VMA_ASSERT(allocator && allocation && buffer);
9164 
9165  VMA_DEBUG_LOG("vmaBindBufferMemory");
9166 
9167  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9168 
9169  return allocator->BindBufferMemory(allocation, buffer);
9170 }
9171 
9172 VkResult vmaBindImageMemory(
9173  VmaAllocator allocator,
9174  VmaAllocation allocation,
9175  VkImage image)
9176 {
9177  VMA_ASSERT(allocator && allocation && image);
9178 
9179  VMA_DEBUG_LOG("vmaBindImageMemory");
9180 
9181  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9182 
9183  return allocator->BindImageMemory(allocation, image);
9184 }
9185 
9186 VkResult vmaCreateBuffer(
9187  VmaAllocator allocator,
9188  const VkBufferCreateInfo* pBufferCreateInfo,
9189  const VmaAllocationCreateInfo* pAllocationCreateInfo,
9190  VkBuffer* pBuffer,
9191  VmaAllocation* pAllocation,
9192  VmaAllocationInfo* pAllocationInfo)
9193 {
9194  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
9195 
9196  VMA_DEBUG_LOG("vmaCreateBuffer");
9197 
9198  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9199 
9200  *pBuffer = VK_NULL_HANDLE;
9201  *pAllocation = VK_NULL_HANDLE;
9202 
9203  // 1. Create VkBuffer.
9204  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
9205  allocator->m_hDevice,
9206  pBufferCreateInfo,
9207  allocator->GetAllocationCallbacks(),
9208  pBuffer);
9209  if(res >= 0)
9210  {
9211  // 2. vkGetBufferMemoryRequirements.
9212  VkMemoryRequirements vkMemReq = {};
9213  bool requiresDedicatedAllocation = false;
9214  bool prefersDedicatedAllocation = false;
9215  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
9216  requiresDedicatedAllocation, prefersDedicatedAllocation);
9217 
9218  // Make sure alignment requirements for specific buffer usages reported
9219  // in Physical Device Properties are included in alignment reported by memory requirements.
9220  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
9221  {
9222  VMA_ASSERT(vkMemReq.alignment %
9223  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
9224  }
9225  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
9226  {
9227  VMA_ASSERT(vkMemReq.alignment %
9228  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
9229  }
9230  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
9231  {
9232  VMA_ASSERT(vkMemReq.alignment %
9233  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
9234  }
9235 
9236  // 3. Allocate memory using allocator.
9237  res = allocator->AllocateMemory(
9238  vkMemReq,
9239  requiresDedicatedAllocation,
9240  prefersDedicatedAllocation,
9241  *pBuffer, // dedicatedBuffer
9242  VK_NULL_HANDLE, // dedicatedImage
9243  *pAllocationCreateInfo,
9244  VMA_SUBALLOCATION_TYPE_BUFFER,
9245  pAllocation);
9246  if(res >= 0)
9247  {
9248  // 3. Bind buffer with memory.
9249  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
9250  if(res >= 0)
9251  {
9252  // All steps succeeded.
9253  if(pAllocationInfo != VMA_NULL)
9254  {
9255  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
9256  }
9257  return VK_SUCCESS;
9258  }
9259  allocator->FreeMemory(*pAllocation);
9260  *pAllocation = VK_NULL_HANDLE;
9261  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
9262  *pBuffer = VK_NULL_HANDLE;
9263  return res;
9264  }
9265  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
9266  *pBuffer = VK_NULL_HANDLE;
9267  return res;
9268  }
9269  return res;
9270 }
9271 
9272 void vmaDestroyBuffer(
9273  VmaAllocator allocator,
9274  VkBuffer buffer,
9275  VmaAllocation allocation)
9276 {
9277  if(buffer != VK_NULL_HANDLE)
9278  {
9279  VMA_ASSERT(allocator);
9280 
9281  VMA_DEBUG_LOG("vmaDestroyBuffer");
9282 
9283  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9284 
9285  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
9286 
9287  allocator->FreeMemory(allocation);
9288  }
9289 }
9290 
9291 VkResult vmaCreateImage(
9292  VmaAllocator allocator,
9293  const VkImageCreateInfo* pImageCreateInfo,
9294  const VmaAllocationCreateInfo* pAllocationCreateInfo,
9295  VkImage* pImage,
9296  VmaAllocation* pAllocation,
9297  VmaAllocationInfo* pAllocationInfo)
9298 {
9299  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
9300 
9301  VMA_DEBUG_LOG("vmaCreateImage");
9302 
9303  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9304 
9305  *pImage = VK_NULL_HANDLE;
9306  *pAllocation = VK_NULL_HANDLE;
9307 
9308  // 1. Create VkImage.
9309  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
9310  allocator->m_hDevice,
9311  pImageCreateInfo,
9312  allocator->GetAllocationCallbacks(),
9313  pImage);
9314  if(res >= 0)
9315  {
9316  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
9317  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
9318  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
9319 
9320  // 2. Allocate memory using allocator.
9321  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
9322  if(res >= 0)
9323  {
9324  // 3. Bind image with memory.
9325  res = allocator->BindImageMemory(*pAllocation, *pImage);
9326  if(res >= 0)
9327  {
9328  // All steps succeeded.
9329  if(pAllocationInfo != VMA_NULL)
9330  {
9331  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
9332  }
9333  return VK_SUCCESS;
9334  }
9335  allocator->FreeMemory(*pAllocation);
9336  *pAllocation = VK_NULL_HANDLE;
9337  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
9338  *pImage = VK_NULL_HANDLE;
9339  return res;
9340  }
9341  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
9342  *pImage = VK_NULL_HANDLE;
9343  return res;
9344  }
9345  return res;
9346 }
9347 
9348 void vmaDestroyImage(
9349  VmaAllocator allocator,
9350  VkImage image,
9351  VmaAllocation allocation)
9352 {
9353  if(image != VK_NULL_HANDLE)
9354  {
9355  VMA_ASSERT(allocator);
9356 
9357  VMA_DEBUG_LOG("vmaDestroyImage");
9358 
9359  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9360 
9361  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
9362 
9363  allocator->FreeMemory(allocation);
9364  }
9365 }
9366 
9367 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1157
+
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:1419
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
-
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1178
+
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1182
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
Represents single memory allocation.
-
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1163
+
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1167
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
-
Definition: vk_mem_alloc.h:1372
-
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1157
-
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:1745
-
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1175
-
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:1944
-
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:1591
+
Definition: vk_mem_alloc.h:1376
+
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1161
+
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:1749
+
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1179
+
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:1948
+
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:1595
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
-
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:1645
-
Definition: vk_mem_alloc.h:1452
-
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1146
-
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:1490
-
Definition: vk_mem_alloc.h:1399
-
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1187
+
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:1649
+
Definition: vk_mem_alloc.h:1456
+
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1150
+
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:1494
+
Definition: vk_mem_alloc.h:1403
+
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1191
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
-
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:1240
-
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1172
+
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:1244
+
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1176
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
-
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:1403
+
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:1407
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
-
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1305
-
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1160
-
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1304
-
PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR
Definition: vk_mem_alloc.h:1168
-
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:1948
+
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1309
+
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1164
+
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1308
+
PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR
Definition: vk_mem_alloc.h:1172
+
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:1952
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
-
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1204
-
VmaStatInfo total
Definition: vk_mem_alloc.h:1314
-
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:1956
-
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1474
-
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1939
-
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1161
-
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1088
+
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1208
+
VmaStatInfo total
Definition: vk_mem_alloc.h:1318
+
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:1960
+
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1478
+
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1943
+
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1165
+
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1092
Represents main object of this library initialized.
-
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1181
+
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1185
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
-
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:1599
-
Definition: vk_mem_alloc.h:1593
-
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:1755
+
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:1603
+
Definition: vk_mem_alloc.h:1597
+
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:1759
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
-
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1158
-
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:1511
-
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:1615
-
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:1651
+
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1162
+
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:1515
+
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:1619
+
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:1655
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
-
Definition: vk_mem_alloc.h:1144
-
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:1602
+
Definition: vk_mem_alloc.h:1148
+
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:1606
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
-
VmaMemoryUsage
Definition: vk_mem_alloc.h:1350
+
VmaMemoryUsage
Definition: vk_mem_alloc.h:1354
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
-
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:1934
+
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:1938
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
-
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:1952
-
Definition: vk_mem_alloc.h:1389
-
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:1498
-
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1159
+
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:1956
+
Definition: vk_mem_alloc.h:1393
+
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:1502
+
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1163
Represents custom memory pool.
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
-
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1310
-
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1094
+
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1314
+
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1098
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
-
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1115
+
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1119
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
-
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1120
-
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:1954
+
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1124
+
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:1958
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
-
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1485
-
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region.
Definition: vk_mem_alloc.h:1661
+
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1489
+
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region.
Definition: vk_mem_alloc.h:1665
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
-
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1154
-
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1293
-
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
Definition: vk_mem_alloc.h:1610
-
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1107
+
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1158
+
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1297
+
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
Definition: vk_mem_alloc.h:1614
+
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1111
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
-
Definition: vk_mem_alloc.h:1459
-
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1306
-
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1111
-
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:1605
-
Definition: vk_mem_alloc.h:1398
+
Definition: vk_mem_alloc.h:1463
+
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1310
+
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1115
+
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:1609
+
Definition: vk_mem_alloc.h:1402
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
-
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1480
-
Definition: vk_mem_alloc.h:1471
+
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1484
+
Definition: vk_mem_alloc.h:1475
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
-
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1296
-
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1156
-
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:1623
-
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1190
-
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1654
-
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:1469
-
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:1504
+
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1300
+
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1160
+
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:1627
+
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1194
+
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1658
+
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:1473
+
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:1508
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
-
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1228
-
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1312
-
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:1439
-
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1305
+
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1232
+
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1316
+
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:1443
+
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1309
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
-
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1165
-
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1109
-
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1164
+
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1169
+
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1113
+
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1168
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
-
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1637
+
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1641
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
-
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:1769
-
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
Definition: vk_mem_alloc.h:1184
-
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1305
-
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1302
+
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:1773
+
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
Definition: vk_mem_alloc.h:1188
+
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1309
+
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1306
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
-
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:1642
-
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:1750
-
Definition: vk_mem_alloc.h:1467
-
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1950
-
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1152
+
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:1646
+
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:1754
+
Definition: vk_mem_alloc.h:1471
+
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1954
+
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1156
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
-
PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR
Definition: vk_mem_alloc.h:1167
-
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1300
-
Definition: vk_mem_alloc.h:1355
-
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:1595
+
PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR
Definition: vk_mem_alloc.h:1171
+
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1304
+
Definition: vk_mem_alloc.h:1359
+
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:1599
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
-
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1298
-
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1162
-
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1166
-
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:1426
-
Definition: vk_mem_alloc.h:1382
-
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:1764
+
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1302
+
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1166
+
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1170
+
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:1430
+
Definition: vk_mem_alloc.h:1386
+
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:1768
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
-
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1142
+
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1146
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
-
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1155
-
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:1731
+
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1159
+
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:1735
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
-
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:1573
-
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1306
+
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:1577
+
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1310
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
- -
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1313
+ +
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1317
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
-
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1648
-
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1306
-
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:1736
+
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1652
+
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1310
+
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:1740