From 3057956f31324dc4723149f0a075ba2395cb61b4 Mon Sep 17 00:00:00 2001 From: Adam Sawicki Date: Tue, 17 Oct 2017 12:14:41 +0200 Subject: [PATCH] Minor improvements in documentation. --- docs/html/vk__mem__alloc_8h.html | 9 ++++++--- docs/html/vk__mem__alloc_8h_source.html | 2 +- src/vk_mem_alloc.h | 14 +++++++++++++- 3 files changed, 20 insertions(+), 5 deletions(-) diff --git a/docs/html/vk__mem__alloc_8h.html b/docs/html/vk__mem__alloc_8h.html index 9d2bbde..b160f3b 100644 --- a/docs/html/vk__mem__alloc_8h.html +++ b/docs/html/vk__mem__alloc_8h.html @@ -1083,7 +1083,8 @@ Functions
  • Binds the buffer with the memory.
  • If any of these operations fail, buffer and allocation are not created, returned value is negative error code, *pBuffer and *pAllocation are null.

    -

    If the function succeeded, you must destroy both buffer and allocation when you no longer need them using either convenience function vmaDestroyBuffer() or separately, using vkDestroyBuffer() and vmaFreeMemory().

    +

    If the function succeeded, you must destroy both buffer and allocation when you no longer need them using either convenience function vmaDestroyBuffer() or separately, using vkDestroyBuffer() and vmaFreeMemory().

    +

    If VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag was used, VK_KHR_dedicated_allocation extension is used internally to query driver whether it requires or prefers the new buffer to have dedicated allocation. If yes, and if dedicated allocation is possible (VmaAllocationCreateInfo::pool is null and VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT is not used), it creates dedicated allocation for this buffer, just like when using VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.

    @@ -1348,7 +1349,8 @@ Functions

    Destroys Vulkan buffer and frees allocated memory.

    This is just a convenience function equivalent to:

    -
    vkDestroyBuffer(device, buffer, allocationCallbacks);
    vmaFreeMemory(allocator, allocation);
    +
    vkDestroyBuffer(device, buffer, allocationCallbacks);
    vmaFreeMemory(allocator, allocation);

    It it safe to pass null as buffer and/or allocation.

    + @@ -1385,7 +1387,8 @@ Functions

    Destroys Vulkan image and frees allocated memory.

    This is just a convenience function equivalent to:

    -
    vkDestroyImage(device, image, allocationCallbacks);
    vmaFreeMemory(allocator, allocation);
    +
    vkDestroyImage(device, image, allocationCallbacks);
    vmaFreeMemory(allocator, allocation);

    It it safe to pass null as image and/or allocation.

    + diff --git a/docs/html/vk__mem__alloc_8h_source.html b/docs/html/vk__mem__alloc_8h_source.html index 211939f..2ca8750 100644 --- a/docs/html/vk__mem__alloc_8h_source.html +++ b/docs/html/vk__mem__alloc_8h_source.html @@ -62,7 +62,7 @@ $(function() {
    vk_mem_alloc.h
    -Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    492 #include <vulkan/vulkan.h>
    493 
    494 VK_DEFINE_HANDLE(VmaAllocator)
    495 
    496 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    498  VmaAllocator allocator,
    499  uint32_t memoryType,
    500  VkDeviceMemory memory,
    501  VkDeviceSize size);
    503 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    504  VmaAllocator allocator,
    505  uint32_t memoryType,
    506  VkDeviceMemory memory,
    507  VkDeviceSize size);
    508 
    516 typedef struct VmaDeviceMemoryCallbacks {
    522 
    558 
    561 typedef VkFlags VmaAllocatorCreateFlags;
    562 
    567 typedef struct VmaVulkanFunctions {
    568  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    569  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    570  PFN_vkAllocateMemory vkAllocateMemory;
    571  PFN_vkFreeMemory vkFreeMemory;
    572  PFN_vkMapMemory vkMapMemory;
    573  PFN_vkUnmapMemory vkUnmapMemory;
    574  PFN_vkBindBufferMemory vkBindBufferMemory;
    575  PFN_vkBindImageMemory vkBindImageMemory;
    576  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    577  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    578  PFN_vkCreateBuffer vkCreateBuffer;
    579  PFN_vkDestroyBuffer vkDestroyBuffer;
    580  PFN_vkCreateImage vkCreateImage;
    581  PFN_vkDestroyImage vkDestroyImage;
    582  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    583  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    585 
    588 {
    590  VmaAllocatorCreateFlags flags;
    592 
    593  VkPhysicalDevice physicalDevice;
    595 
    596  VkDevice device;
    598 
    601 
    604 
    605  const VkAllocationCallbacks* pAllocationCallbacks;
    607 
    622  uint32_t frameInUseCount;
    640  const VkDeviceSize* pHeapSizeLimit;
    654 
    656 VkResult vmaCreateAllocator(
    657  const VmaAllocatorCreateInfo* pCreateInfo,
    658  VmaAllocator* pAllocator);
    659 
    662  VmaAllocator allocator);
    663 
    669  VmaAllocator allocator,
    670  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    671 
    677  VmaAllocator allocator,
    678  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    679 
    687  VmaAllocator allocator,
    688  uint32_t memoryTypeIndex,
    689  VkMemoryPropertyFlags* pFlags);
    690 
    700  VmaAllocator allocator,
    701  uint32_t frameIndex);
    702 
    705 typedef struct VmaStatInfo
    706 {
    708  uint32_t blockCount;
    710  uint32_t allocationCount;
    714  VkDeviceSize usedBytes;
    716  VkDeviceSize unusedBytes;
    717  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
    718  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
    719 } VmaStatInfo;
    720 
    722 typedef struct VmaStats
    723 {
    724  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    725  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    727 } VmaStats;
    728 
    730 void vmaCalculateStats(
    731  VmaAllocator allocator,
    732  VmaStats* pStats);
    733 
    734 #define VMA_STATS_STRING_ENABLED 1
    735 
    736 #if VMA_STATS_STRING_ENABLED
    737 
    739 
    742  VmaAllocator allocator,
    743  char** ppStatsString,
    744  VkBool32 detailedMap);
    745 
    746 void vmaFreeStatsString(
    747  VmaAllocator allocator,
    748  char* pStatsString);
    749 
    750 #endif // #if VMA_STATS_STRING_ENABLED
    751 
    752 VK_DEFINE_HANDLE(VmaPool)
    753 
    754 typedef enum VmaMemoryUsage
    755 {
    761 
    764 
    767 
    771 
    786 
    831 
    834 typedef VkFlags VmaAllocationCreateFlags;
    835 
    837 {
    839  VmaAllocationCreateFlags flags;
    850  VkMemoryPropertyFlags requiredFlags;
    856  VkMemoryPropertyFlags preferredFlags;
    858  void* pUserData;
    863  VmaPool pool;
    865 
    880 VkResult vmaFindMemoryTypeIndex(
    881  VmaAllocator allocator,
    882  uint32_t memoryTypeBits,
    883  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    884  uint32_t* pMemoryTypeIndex);
    885 
    887 typedef enum VmaPoolCreateFlagBits {
    915 
    918 typedef VkFlags VmaPoolCreateFlags;
    919 
    922 typedef struct VmaPoolCreateInfo {
    925  uint32_t memoryTypeIndex;
    928  VmaPoolCreateFlags flags;
    933  VkDeviceSize blockSize;
    960  uint32_t frameInUseCount;
    962 
    965 typedef struct VmaPoolStats {
    968  VkDeviceSize size;
    971  VkDeviceSize unusedSize;
    984  VkDeviceSize unusedRangeSizeMax;
    985 } VmaPoolStats;
    986 
    993 VkResult vmaCreatePool(
    994  VmaAllocator allocator,
    995  const VmaPoolCreateInfo* pCreateInfo,
    996  VmaPool* pPool);
    997 
    1000 void vmaDestroyPool(
    1001  VmaAllocator allocator,
    1002  VmaPool pool);
    1003 
    1010 void vmaGetPoolStats(
    1011  VmaAllocator allocator,
    1012  VmaPool pool,
    1013  VmaPoolStats* pPoolStats);
    1014 
    1022  VmaAllocator allocator,
    1023  VmaPool pool,
    1024  size_t* pLostAllocationCount);
    1025 
    1026 VK_DEFINE_HANDLE(VmaAllocation)
    1027 
    1028 
    1030 typedef struct VmaAllocationInfo {
    1035  uint32_t memoryType;
    1044  VkDeviceMemory deviceMemory;
    1049  VkDeviceSize offset;
    1054  VkDeviceSize size;
    1065  void* pUserData;
    1067 
    1078 VkResult vmaAllocateMemory(
    1079  VmaAllocator allocator,
    1080  const VkMemoryRequirements* pVkMemoryRequirements,
    1081  const VmaAllocationCreateInfo* pCreateInfo,
    1082  VmaAllocation* pAllocation,
    1083  VmaAllocationInfo* pAllocationInfo);
    1084 
    1092  VmaAllocator allocator,
    1093  VkBuffer buffer,
    1094  const VmaAllocationCreateInfo* pCreateInfo,
    1095  VmaAllocation* pAllocation,
    1096  VmaAllocationInfo* pAllocationInfo);
    1097 
    1099 VkResult vmaAllocateMemoryForImage(
    1100  VmaAllocator allocator,
    1101  VkImage image,
    1102  const VmaAllocationCreateInfo* pCreateInfo,
    1103  VmaAllocation* pAllocation,
    1104  VmaAllocationInfo* pAllocationInfo);
    1105 
    1107 void vmaFreeMemory(
    1108  VmaAllocator allocator,
    1109  VmaAllocation allocation);
    1110 
    1113  VmaAllocator allocator,
    1114  VmaAllocation allocation,
    1115  VmaAllocationInfo* pAllocationInfo);
    1116 
    1119  VmaAllocator allocator,
    1120  VmaAllocation allocation,
    1121  void* pUserData);
    1122 
    1134  VmaAllocator allocator,
    1135  VmaAllocation* pAllocation);
    1136 
    1145 VkResult vmaMapMemory(
    1146  VmaAllocator allocator,
    1147  VmaAllocation allocation,
    1148  void** ppData);
    1149 
    1150 void vmaUnmapMemory(
    1151  VmaAllocator allocator,
    1152  VmaAllocation allocation);
    1153 
    1176 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator);
    1177 
    1185 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator);
    1186 
    1188 typedef struct VmaDefragmentationInfo {
    1193  VkDeviceSize maxBytesToMove;
    1200 
    1202 typedef struct VmaDefragmentationStats {
    1204  VkDeviceSize bytesMoved;
    1206  VkDeviceSize bytesFreed;
    1212 
    1284 VkResult vmaDefragment(
    1285  VmaAllocator allocator,
    1286  VmaAllocation* pAllocations,
    1287  size_t allocationCount,
    1288  VkBool32* pAllocationsChanged,
    1289  const VmaDefragmentationInfo *pDefragmentationInfo,
    1290  VmaDefragmentationStats* pDefragmentationStats);
    1291 
    1310 VkResult vmaCreateBuffer(
    1311  VmaAllocator allocator,
    1312  const VkBufferCreateInfo* pBufferCreateInfo,
    1313  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    1314  VkBuffer* pBuffer,
    1315  VmaAllocation* pAllocation,
    1316  VmaAllocationInfo* pAllocationInfo);
    1317 
    1327 void vmaDestroyBuffer(
    1328  VmaAllocator allocator,
    1329  VkBuffer buffer,
    1330  VmaAllocation allocation);
    1331 
    1333 VkResult vmaCreateImage(
    1334  VmaAllocator allocator,
    1335  const VkImageCreateInfo* pImageCreateInfo,
    1336  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    1337  VkImage* pImage,
    1338  VmaAllocation* pAllocation,
    1339  VmaAllocationInfo* pAllocationInfo);
    1340 
    1350 void vmaDestroyImage(
    1351  VmaAllocator allocator,
    1352  VkImage image,
    1353  VmaAllocation allocation);
    1354 
    1355 #ifdef __cplusplus
    1356 }
    1357 #endif
    1358 
    1359 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    1360 
    1361 // For Visual Studio IntelliSense.
    1362 #ifdef __INTELLISENSE__
    1363 #define VMA_IMPLEMENTATION
    1364 #endif
    1365 
    1366 #ifdef VMA_IMPLEMENTATION
    1367 #undef VMA_IMPLEMENTATION
    1368 
    1369 #include <cstdint>
    1370 #include <cstdlib>
    1371 #include <cstring>
    1372 
    1373 /*******************************************************************************
    1374 CONFIGURATION SECTION
    1375 
    1376 Define some of these macros before each #include of this header or change them
    1377 here if you need other then default behavior depending on your environment.
    1378 */
    1379 
    1380 /*
    1381 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    1382 internally, like:
    1383 
    1384  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    1385 
    1386 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    1387 VmaAllocatorCreateInfo::pVulkanFunctions.
    1388 */
    1389 #ifndef VMA_STATIC_VULKAN_FUNCTIONS
    1390 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    1391 #endif
    1392 
    1393 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    1394 //#define VMA_USE_STL_CONTAINERS 1
    1395 
    1396 /* Set this macro to 1 to make the library including and using STL containers:
    1397 std::pair, std::vector, std::list, std::unordered_map.
    1398 
    1399 Set it to 0 or undefined to make the library using its own implementation of
    1400 the containers.
    1401 */
    1402 #if VMA_USE_STL_CONTAINERS
    1403  #define VMA_USE_STL_VECTOR 1
    1404  #define VMA_USE_STL_UNORDERED_MAP 1
    1405  #define VMA_USE_STL_LIST 1
    1406 #endif
    1407 
    1408 #if VMA_USE_STL_VECTOR
    1409  #include <vector>
    1410 #endif
    1411 
    1412 #if VMA_USE_STL_UNORDERED_MAP
    1413  #include <unordered_map>
    1414 #endif
    1415 
    1416 #if VMA_USE_STL_LIST
    1417  #include <list>
    1418 #endif
    1419 
    1420 /*
    1421 Following headers are used in this CONFIGURATION section only, so feel free to
    1422 remove them if not needed.
    1423 */
    1424 #include <cassert> // for assert
    1425 #include <algorithm> // for min, max
    1426 #include <mutex> // for std::mutex
    1427 #include <atomic> // for std::atomic
    1428 
    1429 #if !defined(_WIN32)
    1430  #include <malloc.h> // for aligned_alloc()
    1431 #endif
    1432 
    1433 // Normal assert to check for programmer's errors, especially in Debug configuration.
    1434 #ifndef VMA_ASSERT
    1435  #ifdef _DEBUG
    1436  #define VMA_ASSERT(expr) assert(expr)
    1437  #else
    1438  #define VMA_ASSERT(expr)
    1439  #endif
    1440 #endif
    1441 
    1442 // Assert that will be called very often, like inside data structures e.g. operator[].
    1443 // Making it non-empty can make program slow.
    1444 #ifndef VMA_HEAVY_ASSERT
    1445  #ifdef _DEBUG
    1446  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    1447  #else
    1448  #define VMA_HEAVY_ASSERT(expr)
    1449  #endif
    1450 #endif
    1451 
    1452 #ifndef VMA_NULL
    1453  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    1454  #define VMA_NULL nullptr
    1455 #endif
    1456 
    1457 #ifndef VMA_ALIGN_OF
    1458  #define VMA_ALIGN_OF(type) (__alignof(type))
    1459 #endif
    1460 
    1461 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    1462  #if defined(_WIN32)
    1463  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    1464  #else
    1465  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    1466  #endif
    1467 #endif
    1468 
    1469 #ifndef VMA_SYSTEM_FREE
    1470  #if defined(_WIN32)
    1471  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    1472  #else
    1473  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    1474  #endif
    1475 #endif
    1476 
    1477 #ifndef VMA_MIN
    1478  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    1479 #endif
    1480 
    1481 #ifndef VMA_MAX
    1482  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    1483 #endif
    1484 
    1485 #ifndef VMA_SWAP
    1486  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    1487 #endif
    1488 
    1489 #ifndef VMA_SORT
    1490  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    1491 #endif
    1492 
    1493 #ifndef VMA_DEBUG_LOG
    1494  #define VMA_DEBUG_LOG(format, ...)
    1495  /*
    1496  #define VMA_DEBUG_LOG(format, ...) do { \
    1497  printf(format, __VA_ARGS__); \
    1498  printf("\n"); \
    1499  } while(false)
    1500  */
    1501 #endif
    1502 
    1503 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    1504 #if VMA_STATS_STRING_ENABLED
    1505  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    1506  {
    1507  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    1508  }
    1509  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    1510  {
    1511  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    1512  }
    1513  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    1514  {
    1515  snprintf(outStr, strLen, "%p", ptr);
    1516  }
    1517 #endif
    1518 
    1519 #ifndef VMA_MUTEX
    1520  class VmaMutex
    1521  {
    1522  public:
    1523  VmaMutex() { }
    1524  ~VmaMutex() { }
    1525  void Lock() { m_Mutex.lock(); }
    1526  void Unlock() { m_Mutex.unlock(); }
    1527  private:
    1528  std::mutex m_Mutex;
    1529  };
    1530  #define VMA_MUTEX VmaMutex
    1531 #endif
    1532 
    1533 /*
    1534 If providing your own implementation, you need to implement a subset of std::atomic:
    1535 
    1536 - Constructor(uint32_t desired)
    1537 - uint32_t load() const
    1538 - void store(uint32_t desired)
    1539 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    1540 */
    1541 #ifndef VMA_ATOMIC_UINT32
    1542  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    1543 #endif
    1544 
    1545 #ifndef VMA_BEST_FIT
    1546 
    1558  #define VMA_BEST_FIT (1)
    1559 #endif
    1560 
    1561 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    1562 
    1566  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    1567 #endif
    1568 
    1569 #ifndef VMA_DEBUG_ALIGNMENT
    1570 
    1574  #define VMA_DEBUG_ALIGNMENT (1)
    1575 #endif
    1576 
    1577 #ifndef VMA_DEBUG_MARGIN
    1578 
    1582  #define VMA_DEBUG_MARGIN (0)
    1583 #endif
    1584 
    1585 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    1586 
    1590  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    1591 #endif
    1592 
    1593 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    1594 
    1598  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    1599 #endif
    1600 
    1601 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    1602  #define VMA_SMALL_HEAP_MAX_SIZE (512 * 1024 * 1024)
    1604 #endif
    1605 
    1606 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    1607  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256 * 1024 * 1024)
    1609 #endif
    1610 
    1611 #ifndef VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE
    1612  #define VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE (64 * 1024 * 1024)
    1614 #endif
    1615 
    1616 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    1617 
    1618 /*******************************************************************************
    1619 END OF CONFIGURATION
    1620 */
    1621 
    1622 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    1623  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    1624 
    1625 // Returns number of bits set to 1 in (v).
    1626 static inline uint32_t CountBitsSet(uint32_t v)
    1627 {
    1628  uint32_t c = v - ((v >> 1) & 0x55555555);
    1629  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    1630  c = ((c >> 4) + c) & 0x0F0F0F0F;
    1631  c = ((c >> 8) + c) & 0x00FF00FF;
    1632  c = ((c >> 16) + c) & 0x0000FFFF;
    1633  return c;
    1634 }
    1635 
    1636 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    1637 // Use types like uint32_t, uint64_t as T.
    1638 template <typename T>
    1639 static inline T VmaAlignUp(T val, T align)
    1640 {
    1641  return (val + align - 1) / align * align;
    1642 }
    1643 
    1644 // Division with mathematical rounding to nearest number.
    1645 template <typename T>
    1646 inline T VmaRoundDiv(T x, T y)
    1647 {
    1648  return (x + (y / (T)2)) / y;
    1649 }
    1650 
    1651 #ifndef VMA_SORT
    1652 
    1653 template<typename Iterator, typename Compare>
    1654 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    1655 {
    1656  Iterator centerValue = end; --centerValue;
    1657  Iterator insertIndex = beg;
    1658  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    1659  {
    1660  if(cmp(*memTypeIndex, *centerValue))
    1661  {
    1662  if(insertIndex != memTypeIndex)
    1663  {
    1664  VMA_SWAP(*memTypeIndex, *insertIndex);
    1665  }
    1666  ++insertIndex;
    1667  }
    1668  }
    1669  if(insertIndex != centerValue)
    1670  {
    1671  VMA_SWAP(*insertIndex, *centerValue);
    1672  }
    1673  return insertIndex;
    1674 }
    1675 
    1676 template<typename Iterator, typename Compare>
    1677 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    1678 {
    1679  if(beg < end)
    1680  {
    1681  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    1682  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    1683  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    1684  }
    1685 }
    1686 
    1687 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    1688 
    1689 #endif // #ifndef VMA_SORT
    1690 
    1691 /*
    1692 Returns true if two memory blocks occupy overlapping pages.
    1693 ResourceA must be in less memory offset than ResourceB.
    1694 
    1695 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    1696 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    1697 */
    1698 static inline bool VmaBlocksOnSamePage(
    1699  VkDeviceSize resourceAOffset,
    1700  VkDeviceSize resourceASize,
    1701  VkDeviceSize resourceBOffset,
    1702  VkDeviceSize pageSize)
    1703 {
    1704  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    1705  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    1706  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    1707  VkDeviceSize resourceBStart = resourceBOffset;
    1708  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    1709  return resourceAEndPage == resourceBStartPage;
    1710 }
    1711 
    1712 enum VmaSuballocationType
    1713 {
    1714  VMA_SUBALLOCATION_TYPE_FREE = 0,
    1715  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    1716  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    1717  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    1718  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    1719  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    1720  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    1721 };
    1722 
    1723 /*
    1724 Returns true if given suballocation types could conflict and must respect
    1725 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    1726 or linear image and another one is optimal image. If type is unknown, behave
    1727 conservatively.
    1728 */
    1729 static inline bool VmaIsBufferImageGranularityConflict(
    1730  VmaSuballocationType suballocType1,
    1731  VmaSuballocationType suballocType2)
    1732 {
    1733  if(suballocType1 > suballocType2)
    1734  {
    1735  VMA_SWAP(suballocType1, suballocType2);
    1736  }
    1737 
    1738  switch(suballocType1)
    1739  {
    1740  case VMA_SUBALLOCATION_TYPE_FREE:
    1741  return false;
    1742  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    1743  return true;
    1744  case VMA_SUBALLOCATION_TYPE_BUFFER:
    1745  return
    1746  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    1747  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    1748  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    1749  return
    1750  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    1751  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    1752  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    1753  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    1754  return
    1755  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    1756  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    1757  return false;
    1758  default:
    1759  VMA_ASSERT(0);
    1760  return true;
    1761  }
    1762 }
    1763 
    1764 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    1765 struct VmaMutexLock
    1766 {
    1767 public:
    1768  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
    1769  m_pMutex(useMutex ? &mutex : VMA_NULL)
    1770  {
    1771  if(m_pMutex)
    1772  {
    1773  m_pMutex->Lock();
    1774  }
    1775  }
    1776 
    1777  ~VmaMutexLock()
    1778  {
    1779  if(m_pMutex)
    1780  {
    1781  m_pMutex->Unlock();
    1782  }
    1783  }
    1784 
    1785 private:
    1786  VMA_MUTEX* m_pMutex;
    1787 };
    1788 
    1789 #if VMA_DEBUG_GLOBAL_MUTEX
    1790  static VMA_MUTEX gDebugGlobalMutex;
    1791  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    1792 #else
    1793  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    1794 #endif
    1795 
    1796 // Minimum size of a free suballocation to register it in the free suballocation collection.
    1797 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    1798 
    1799 /*
    1800 Performs binary search and returns iterator to first element that is greater or
    1801 equal to (key), according to comparison (cmp).
    1802 
    1803 Cmp should return true if first argument is less than second argument.
    1804 
    1805 Returned value is the found element, if present in the collection or place where
    1806 new element with value (key) should be inserted.
    1807 */
    1808 template <typename IterT, typename KeyT, typename CmpT>
    1809 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
    1810 {
    1811  size_t down = 0, up = (end - beg);
    1812  while(down < up)
    1813  {
    1814  const size_t mid = (down + up) / 2;
    1815  if(cmp(*(beg+mid), key))
    1816  {
    1817  down = mid + 1;
    1818  }
    1819  else
    1820  {
    1821  up = mid;
    1822  }
    1823  }
    1824  return beg + down;
    1825 }
    1826 
    1828 // Memory allocation
    1829 
    1830 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    1831 {
    1832  if((pAllocationCallbacks != VMA_NULL) &&
    1833  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    1834  {
    1835  return (*pAllocationCallbacks->pfnAllocation)(
    1836  pAllocationCallbacks->pUserData,
    1837  size,
    1838  alignment,
    1839  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    1840  }
    1841  else
    1842  {
    1843  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    1844  }
    1845 }
    1846 
    1847 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    1848 {
    1849  if((pAllocationCallbacks != VMA_NULL) &&
    1850  (pAllocationCallbacks->pfnFree != VMA_NULL))
    1851  {
    1852  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    1853  }
    1854  else
    1855  {
    1856  VMA_SYSTEM_FREE(ptr);
    1857  }
    1858 }
    1859 
    1860 template<typename T>
    1861 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    1862 {
    1863  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    1864 }
    1865 
    1866 template<typename T>
    1867 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    1868 {
    1869  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    1870 }
    1871 
    1872 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    1873 
    1874 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    1875 
    1876 template<typename T>
    1877 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    1878 {
    1879  ptr->~T();
    1880  VmaFree(pAllocationCallbacks, ptr);
    1881 }
    1882 
    1883 template<typename T>
    1884 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    1885 {
    1886  if(ptr != VMA_NULL)
    1887  {
    1888  for(size_t i = count; i--; )
    1889  {
    1890  ptr[i].~T();
    1891  }
    1892  VmaFree(pAllocationCallbacks, ptr);
    1893  }
    1894 }
    1895 
    1896 // STL-compatible allocator.
    1897 template<typename T>
    1898 class VmaStlAllocator
    1899 {
    1900 public:
    1901  const VkAllocationCallbacks* const m_pCallbacks;
    1902  typedef T value_type;
    1903 
    1904  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    1905  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    1906 
    1907  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    1908  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    1909 
    1910  template<typename U>
    1911  bool operator==(const VmaStlAllocator<U>& rhs) const
    1912  {
    1913  return m_pCallbacks == rhs.m_pCallbacks;
    1914  }
    1915  template<typename U>
    1916  bool operator!=(const VmaStlAllocator<U>& rhs) const
    1917  {
    1918  return m_pCallbacks != rhs.m_pCallbacks;
    1919  }
    1920 
    1921  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    1922 };
    1923 
    1924 #if VMA_USE_STL_VECTOR
    1925 
    1926 #define VmaVector std::vector
    1927 
    1928 template<typename T, typename allocatorT>
    1929 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    1930 {
    1931  vec.insert(vec.begin() + index, item);
    1932 }
    1933 
    1934 template<typename T, typename allocatorT>
    1935 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    1936 {
    1937  vec.erase(vec.begin() + index);
    1938 }
    1939 
    1940 #else // #if VMA_USE_STL_VECTOR
    1941 
    1942 /* Class with interface compatible with subset of std::vector.
    1943 T must be POD because constructors and destructors are not called and memcpy is
    1944 used for these objects. */
    1945 template<typename T, typename AllocatorT>
    1946 class VmaVector
    1947 {
    1948 public:
    1949  typedef T value_type;
    1950 
    1951  VmaVector(const AllocatorT& allocator) :
    1952  m_Allocator(allocator),
    1953  m_pArray(VMA_NULL),
    1954  m_Count(0),
    1955  m_Capacity(0)
    1956  {
    1957  }
    1958 
    1959  VmaVector(size_t count, const AllocatorT& allocator) :
    1960  m_Allocator(allocator),
    1961  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    1962  m_Count(count),
    1963  m_Capacity(count)
    1964  {
    1965  }
    1966 
    1967  VmaVector(const VmaVector<T, AllocatorT>& src) :
    1968  m_Allocator(src.m_Allocator),
    1969  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    1970  m_Count(src.m_Count),
    1971  m_Capacity(src.m_Count)
    1972  {
    1973  if(m_Count != 0)
    1974  {
    1975  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    1976  }
    1977  }
    1978 
    1979  ~VmaVector()
    1980  {
    1981  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    1982  }
    1983 
    1984  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    1985  {
    1986  if(&rhs != this)
    1987  {
    1988  resize(rhs.m_Count);
    1989  if(m_Count != 0)
    1990  {
    1991  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    1992  }
    1993  }
    1994  return *this;
    1995  }
    1996 
    1997  bool empty() const { return m_Count == 0; }
    1998  size_t size() const { return m_Count; }
    1999  T* data() { return m_pArray; }
    2000  const T* data() const { return m_pArray; }
    2001 
    2002  T& operator[](size_t index)
    2003  {
    2004  VMA_HEAVY_ASSERT(index < m_Count);
    2005  return m_pArray[index];
    2006  }
    2007  const T& operator[](size_t index) const
    2008  {
    2009  VMA_HEAVY_ASSERT(index < m_Count);
    2010  return m_pArray[index];
    2011  }
    2012 
    2013  T& front()
    2014  {
    2015  VMA_HEAVY_ASSERT(m_Count > 0);
    2016  return m_pArray[0];
    2017  }
    2018  const T& front() const
    2019  {
    2020  VMA_HEAVY_ASSERT(m_Count > 0);
    2021  return m_pArray[0];
    2022  }
    2023  T& back()
    2024  {
    2025  VMA_HEAVY_ASSERT(m_Count > 0);
    2026  return m_pArray[m_Count - 1];
    2027  }
    2028  const T& back() const
    2029  {
    2030  VMA_HEAVY_ASSERT(m_Count > 0);
    2031  return m_pArray[m_Count - 1];
    2032  }
    2033 
    2034  void reserve(size_t newCapacity, bool freeMemory = false)
    2035  {
    2036  newCapacity = VMA_MAX(newCapacity, m_Count);
    2037 
    2038  if((newCapacity < m_Capacity) && !freeMemory)
    2039  {
    2040  newCapacity = m_Capacity;
    2041  }
    2042 
    2043  if(newCapacity != m_Capacity)
    2044  {
    2045  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    2046  if(m_Count != 0)
    2047  {
    2048  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    2049  }
    2050  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    2051  m_Capacity = newCapacity;
    2052  m_pArray = newArray;
    2053  }
    2054  }
    2055 
    2056  void resize(size_t newCount, bool freeMemory = false)
    2057  {
    2058  size_t newCapacity = m_Capacity;
    2059  if(newCount > m_Capacity)
    2060  {
    2061  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    2062  }
    2063  else if(freeMemory)
    2064  {
    2065  newCapacity = newCount;
    2066  }
    2067 
    2068  if(newCapacity != m_Capacity)
    2069  {
    2070  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    2071  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    2072  if(elementsToCopy != 0)
    2073  {
    2074  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    2075  }
    2076  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    2077  m_Capacity = newCapacity;
    2078  m_pArray = newArray;
    2079  }
    2080 
    2081  m_Count = newCount;
    2082  }
    2083 
    2084  void clear(bool freeMemory = false)
    2085  {
    2086  resize(0, freeMemory);
    2087  }
    2088 
    2089  void insert(size_t index, const T& src)
    2090  {
    2091  VMA_HEAVY_ASSERT(index <= m_Count);
    2092  const size_t oldCount = size();
    2093  resize(oldCount + 1);
    2094  if(index < oldCount)
    2095  {
    2096  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    2097  }
    2098  m_pArray[index] = src;
    2099  }
    2100 
    2101  void remove(size_t index)
    2102  {
    2103  VMA_HEAVY_ASSERT(index < m_Count);
    2104  const size_t oldCount = size();
    2105  if(index < oldCount - 1)
    2106  {
    2107  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    2108  }
    2109  resize(oldCount - 1);
    2110  }
    2111 
    2112  void push_back(const T& src)
    2113  {
    2114  const size_t newIndex = size();
    2115  resize(newIndex + 1);
    2116  m_pArray[newIndex] = src;
    2117  }
    2118 
    2119  void pop_back()
    2120  {
    2121  VMA_HEAVY_ASSERT(m_Count > 0);
    2122  resize(size() - 1);
    2123  }
    2124 
    2125  void push_front(const T& src)
    2126  {
    2127  insert(0, src);
    2128  }
    2129 
    2130  void pop_front()
    2131  {
    2132  VMA_HEAVY_ASSERT(m_Count > 0);
    2133  remove(0);
    2134  }
    2135 
    2136  typedef T* iterator;
    2137 
    2138  iterator begin() { return m_pArray; }
    2139  iterator end() { return m_pArray + m_Count; }
    2140 
    2141 private:
    2142  AllocatorT m_Allocator;
    2143  T* m_pArray;
    2144  size_t m_Count;
    2145  size_t m_Capacity;
    2146 };
    2147 
    2148 template<typename T, typename allocatorT>
    2149 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    2150 {
    2151  vec.insert(index, item);
    2152 }
    2153 
    2154 template<typename T, typename allocatorT>
    2155 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    2156 {
    2157  vec.remove(index);
    2158 }
    2159 
    2160 #endif // #if VMA_USE_STL_VECTOR
    2161 
    2162 template<typename CmpLess, typename VectorT>
    2163 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    2164 {
    2165  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    2166  vector.data(),
    2167  vector.data() + vector.size(),
    2168  value,
    2169  CmpLess()) - vector.data();
    2170  VmaVectorInsert(vector, indexToInsert, value);
    2171  return indexToInsert;
    2172 }
    2173 
    2174 template<typename CmpLess, typename VectorT>
    2175 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    2176 {
    2177  CmpLess comparator;
    2178  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    2179  vector.begin(),
    2180  vector.end(),
    2181  value,
    2182  comparator);
    2183  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    2184  {
    2185  size_t indexToRemove = it - vector.begin();
    2186  VmaVectorRemove(vector, indexToRemove);
    2187  return true;
    2188  }
    2189  return false;
    2190 }
    2191 
    2192 template<typename CmpLess, typename VectorT>
    2193 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
    2194 {
    2195  CmpLess comparator;
    2196  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    2197  vector.data(),
    2198  vector.data() + vector.size(),
    2199  value,
    2200  comparator);
    2201  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
    2202  {
    2203  return it - vector.begin();
    2204  }
    2205  else
    2206  {
    2207  return vector.size();
    2208  }
    2209 }
    2210 
    2212 // class VmaPoolAllocator
    2213 
    2214 /*
    2215 Allocator for objects of type T using a list of arrays (pools) to speed up
    2216 allocation. Number of elements that can be allocated is not bounded because
    2217 allocator can create multiple blocks.
    2218 */
    2219 template<typename T>
    2220 class VmaPoolAllocator
    2221 {
    2222 public:
    2223  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
    2224  ~VmaPoolAllocator();
    2225  void Clear();
    2226  T* Alloc();
    2227  void Free(T* ptr);
    2228 
    2229 private:
    2230  union Item
    2231  {
    2232  uint32_t NextFreeIndex;
    2233  T Value;
    2234  };
    2235 
    2236  struct ItemBlock
    2237  {
    2238  Item* pItems;
    2239  uint32_t FirstFreeIndex;
    2240  };
    2241 
    2242  const VkAllocationCallbacks* m_pAllocationCallbacks;
    2243  size_t m_ItemsPerBlock;
    2244  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    2245 
    2246  ItemBlock& CreateNewBlock();
    2247 };
    2248 
    2249 template<typename T>
    2250 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
    2251  m_pAllocationCallbacks(pAllocationCallbacks),
    2252  m_ItemsPerBlock(itemsPerBlock),
    2253  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    2254 {
    2255  VMA_ASSERT(itemsPerBlock > 0);
    2256 }
    2257 
    2258 template<typename T>
    2259 VmaPoolAllocator<T>::~VmaPoolAllocator()
    2260 {
    2261  Clear();
    2262 }
    2263 
    2264 template<typename T>
    2265 void VmaPoolAllocator<T>::Clear()
    2266 {
    2267  for(size_t i = m_ItemBlocks.size(); i--; )
    2268  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
    2269  m_ItemBlocks.clear();
    2270 }
    2271 
    2272 template<typename T>
    2273 T* VmaPoolAllocator<T>::Alloc()
    2274 {
    2275  for(size_t i = m_ItemBlocks.size(); i--; )
    2276  {
    2277  ItemBlock& block = m_ItemBlocks[i];
    2278  // This block has some free items: Use first one.
    2279  if(block.FirstFreeIndex != UINT32_MAX)
    2280  {
    2281  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    2282  block.FirstFreeIndex = pItem->NextFreeIndex;
    2283  return &pItem->Value;
    2284  }
    2285  }
    2286 
    2287  // No block has free item: Create new one and use it.
    2288  ItemBlock& newBlock = CreateNewBlock();
    2289  Item* const pItem = &newBlock.pItems[0];
    2290  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    2291  return &pItem->Value;
    2292 }
    2293 
    2294 template<typename T>
    2295 void VmaPoolAllocator<T>::Free(T* ptr)
    2296 {
    2297  // Search all memory blocks to find ptr.
    2298  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
    2299  {
    2300  ItemBlock& block = m_ItemBlocks[i];
    2301 
    2302  // Casting to union.
    2303  Item* pItemPtr;
    2304  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    2305 
    2306  // Check if pItemPtr is in address range of this block.
    2307  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
    2308  {
    2309  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    2310  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    2311  block.FirstFreeIndex = index;
    2312  return;
    2313  }
    2314  }
    2315  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    2316 }
    2317 
    2318 template<typename T>
    2319 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    2320 {
    2321  ItemBlock newBlock = {
    2322  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
    2323 
    2324  m_ItemBlocks.push_back(newBlock);
    2325 
    2326  // Setup singly-linked list of all free items in this block.
    2327  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
    2328  newBlock.pItems[i].NextFreeIndex = i + 1;
    2329  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
    2330  return m_ItemBlocks.back();
    2331 }
    2332 
    2334 // class VmaRawList, VmaList
    2335 
    2336 #if VMA_USE_STL_LIST
    2337 
    2338 #define VmaList std::list
    2339 
    2340 #else // #if VMA_USE_STL_LIST
    2341 
    2342 template<typename T>
    2343 struct VmaListItem
    2344 {
    2345  VmaListItem* pPrev;
    2346  VmaListItem* pNext;
    2347  T Value;
    2348 };
    2349 
    2350 // Doubly linked list.
    2351 template<typename T>
    2352 class VmaRawList
    2353 {
    2354 public:
    2355  typedef VmaListItem<T> ItemType;
    2356 
    2357  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    2358  ~VmaRawList();
    2359  void Clear();
    2360 
    2361  size_t GetCount() const { return m_Count; }
    2362  bool IsEmpty() const { return m_Count == 0; }
    2363 
    2364  ItemType* Front() { return m_pFront; }
    2365  const ItemType* Front() const { return m_pFront; }
    2366  ItemType* Back() { return m_pBack; }
    2367  const ItemType* Back() const { return m_pBack; }
    2368 
    2369  ItemType* PushBack();
    2370  ItemType* PushFront();
    2371  ItemType* PushBack(const T& value);
    2372  ItemType* PushFront(const T& value);
    2373  void PopBack();
    2374  void PopFront();
    2375 
    2376  // Item can be null - it means PushBack.
    2377  ItemType* InsertBefore(ItemType* pItem);
    2378  // Item can be null - it means PushFront.
    2379  ItemType* InsertAfter(ItemType* pItem);
    2380 
    2381  ItemType* InsertBefore(ItemType* pItem, const T& value);
    2382  ItemType* InsertAfter(ItemType* pItem, const T& value);
    2383 
    2384  void Remove(ItemType* pItem);
    2385 
    2386 private:
    2387  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    2388  VmaPoolAllocator<ItemType> m_ItemAllocator;
    2389  ItemType* m_pFront;
    2390  ItemType* m_pBack;
    2391  size_t m_Count;
    2392 
    2393  // Declared not defined, to block copy constructor and assignment operator.
    2394  VmaRawList(const VmaRawList<T>& src);
    2395  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
    2396 };
    2397 
    2398 template<typename T>
    2399 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    2400  m_pAllocationCallbacks(pAllocationCallbacks),
    2401  m_ItemAllocator(pAllocationCallbacks, 128),
    2402  m_pFront(VMA_NULL),
    2403  m_pBack(VMA_NULL),
    2404  m_Count(0)
    2405 {
    2406 }
    2407 
    2408 template<typename T>
    2409 VmaRawList<T>::~VmaRawList()
    2410 {
    2411  // Intentionally not calling Clear, because that would be unnecessary
    2412  // computations to return all items to m_ItemAllocator as free.
    2413 }
    2414 
    2415 template<typename T>
    2416 void VmaRawList<T>::Clear()
    2417 {
    2418  if(IsEmpty() == false)
    2419  {
    2420  ItemType* pItem = m_pBack;
    2421  while(pItem != VMA_NULL)
    2422  {
    2423  ItemType* const pPrevItem = pItem->pPrev;
    2424  m_ItemAllocator.Free(pItem);
    2425  pItem = pPrevItem;
    2426  }
    2427  m_pFront = VMA_NULL;
    2428  m_pBack = VMA_NULL;
    2429  m_Count = 0;
    2430  }
    2431 }
    2432 
    2433 template<typename T>
    2434 VmaListItem<T>* VmaRawList<T>::PushBack()
    2435 {
    2436  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    2437  pNewItem->pNext = VMA_NULL;
    2438  if(IsEmpty())
    2439  {
    2440  pNewItem->pPrev = VMA_NULL;
    2441  m_pFront = pNewItem;
    2442  m_pBack = pNewItem;
    2443  m_Count = 1;
    2444  }
    2445  else
    2446  {
    2447  pNewItem->pPrev = m_pBack;
    2448  m_pBack->pNext = pNewItem;
    2449  m_pBack = pNewItem;
    2450  ++m_Count;
    2451  }
    2452  return pNewItem;
    2453 }
    2454 
    2455 template<typename T>
    2456 VmaListItem<T>* VmaRawList<T>::PushFront()
    2457 {
    2458  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    2459  pNewItem->pPrev = VMA_NULL;
    2460  if(IsEmpty())
    2461  {
    2462  pNewItem->pNext = VMA_NULL;
    2463  m_pFront = pNewItem;
    2464  m_pBack = pNewItem;
    2465  m_Count = 1;
    2466  }
    2467  else
    2468  {
    2469  pNewItem->pNext = m_pFront;
    2470  m_pFront->pPrev = pNewItem;
    2471  m_pFront = pNewItem;
    2472  ++m_Count;
    2473  }
    2474  return pNewItem;
    2475 }
    2476 
    2477 template<typename T>
    2478 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    2479 {
    2480  ItemType* const pNewItem = PushBack();
    2481  pNewItem->Value = value;
    2482  return pNewItem;
    2483 }
    2484 
    2485 template<typename T>
    2486 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    2487 {
    2488  ItemType* const pNewItem = PushFront();
    2489  pNewItem->Value = value;
    2490  return pNewItem;
    2491 }
    2492 
    2493 template<typename T>
    2494 void VmaRawList<T>::PopBack()
    2495 {
    2496  VMA_HEAVY_ASSERT(m_Count > 0);
    2497  ItemType* const pBackItem = m_pBack;
    2498  ItemType* const pPrevItem = pBackItem->pPrev;
    2499  if(pPrevItem != VMA_NULL)
    2500  {
    2501  pPrevItem->pNext = VMA_NULL;
    2502  }
    2503  m_pBack = pPrevItem;
    2504  m_ItemAllocator.Free(pBackItem);
    2505  --m_Count;
    2506 }
    2507 
    2508 template<typename T>
    2509 void VmaRawList<T>::PopFront()
    2510 {
    2511  VMA_HEAVY_ASSERT(m_Count > 0);
    2512  ItemType* const pFrontItem = m_pFront;
    2513  ItemType* const pNextItem = pFrontItem->pNext;
    2514  if(pNextItem != VMA_NULL)
    2515  {
    2516  pNextItem->pPrev = VMA_NULL;
    2517  }
    2518  m_pFront = pNextItem;
    2519  m_ItemAllocator.Free(pFrontItem);
    2520  --m_Count;
    2521 }
    2522 
    2523 template<typename T>
    2524 void VmaRawList<T>::Remove(ItemType* pItem)
    2525 {
    2526  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    2527  VMA_HEAVY_ASSERT(m_Count > 0);
    2528 
    2529  if(pItem->pPrev != VMA_NULL)
    2530  {
    2531  pItem->pPrev->pNext = pItem->pNext;
    2532  }
    2533  else
    2534  {
    2535  VMA_HEAVY_ASSERT(m_pFront == pItem);
    2536  m_pFront = pItem->pNext;
    2537  }
    2538 
    2539  if(pItem->pNext != VMA_NULL)
    2540  {
    2541  pItem->pNext->pPrev = pItem->pPrev;
    2542  }
    2543  else
    2544  {
    2545  VMA_HEAVY_ASSERT(m_pBack == pItem);
    2546  m_pBack = pItem->pPrev;
    2547  }
    2548 
    2549  m_ItemAllocator.Free(pItem);
    2550  --m_Count;
    2551 }
    2552 
    2553 template<typename T>
    2554 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    2555 {
    2556  if(pItem != VMA_NULL)
    2557  {
    2558  ItemType* const prevItem = pItem->pPrev;
    2559  ItemType* const newItem = m_ItemAllocator.Alloc();
    2560  newItem->pPrev = prevItem;
    2561  newItem->pNext = pItem;
    2562  pItem->pPrev = newItem;
    2563  if(prevItem != VMA_NULL)
    2564  {
    2565  prevItem->pNext = newItem;
    2566  }
    2567  else
    2568  {
    2569  VMA_HEAVY_ASSERT(m_pFront == pItem);
    2570  m_pFront = newItem;
    2571  }
    2572  ++m_Count;
    2573  return newItem;
    2574  }
    2575  else
    2576  return PushBack();
    2577 }
    2578 
    2579 template<typename T>
    2580 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    2581 {
    2582  if(pItem != VMA_NULL)
    2583  {
    2584  ItemType* const nextItem = pItem->pNext;
    2585  ItemType* const newItem = m_ItemAllocator.Alloc();
    2586  newItem->pNext = nextItem;
    2587  newItem->pPrev = pItem;
    2588  pItem->pNext = newItem;
    2589  if(nextItem != VMA_NULL)
    2590  {
    2591  nextItem->pPrev = newItem;
    2592  }
    2593  else
    2594  {
    2595  VMA_HEAVY_ASSERT(m_pBack == pItem);
    2596  m_pBack = newItem;
    2597  }
    2598  ++m_Count;
    2599  return newItem;
    2600  }
    2601  else
    2602  return PushFront();
    2603 }
    2604 
    2605 template<typename T>
    2606 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    2607 {
    2608  ItemType* const newItem = InsertBefore(pItem);
    2609  newItem->Value = value;
    2610  return newItem;
    2611 }
    2612 
    2613 template<typename T>
    2614 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    2615 {
    2616  ItemType* const newItem = InsertAfter(pItem);
    2617  newItem->Value = value;
    2618  return newItem;
    2619 }
    2620 
    2621 template<typename T, typename AllocatorT>
    2622 class VmaList
    2623 {
    2624 public:
    2625  class iterator
    2626  {
    2627  public:
    2628  iterator() :
    2629  m_pList(VMA_NULL),
    2630  m_pItem(VMA_NULL)
    2631  {
    2632  }
    2633 
    2634  T& operator*() const
    2635  {
    2636  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2637  return m_pItem->Value;
    2638  }
    2639  T* operator->() const
    2640  {
    2641  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2642  return &m_pItem->Value;
    2643  }
    2644 
    2645  iterator& operator++()
    2646  {
    2647  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2648  m_pItem = m_pItem->pNext;
    2649  return *this;
    2650  }
    2651  iterator& operator--()
    2652  {
    2653  if(m_pItem != VMA_NULL)
    2654  {
    2655  m_pItem = m_pItem->pPrev;
    2656  }
    2657  else
    2658  {
    2659  VMA_HEAVY_ASSERT(!m_pList.IsEmpty());
    2660  m_pItem = m_pList->Back();
    2661  }
    2662  return *this;
    2663  }
    2664 
    2665  iterator operator++(int)
    2666  {
    2667  iterator result = *this;
    2668  ++*this;
    2669  return result;
    2670  }
    2671  iterator operator--(int)
    2672  {
    2673  iterator result = *this;
    2674  --*this;
    2675  return result;
    2676  }
    2677 
    2678  bool operator==(const iterator& rhs) const
    2679  {
    2680  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    2681  return m_pItem == rhs.m_pItem;
    2682  }
    2683  bool operator!=(const iterator& rhs) const
    2684  {
    2685  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    2686  return m_pItem != rhs.m_pItem;
    2687  }
    2688 
    2689  private:
    2690  VmaRawList<T>* m_pList;
    2691  VmaListItem<T>* m_pItem;
    2692 
    2693  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    2694  m_pList(pList),
    2695  m_pItem(pItem)
    2696  {
    2697  }
    2698 
    2699  friend class VmaList<T, AllocatorT>;
    2700  };
    2701 
    2702  class const_iterator
    2703  {
    2704  public:
    2705  const_iterator() :
    2706  m_pList(VMA_NULL),
    2707  m_pItem(VMA_NULL)
    2708  {
    2709  }
    2710 
    2711  const_iterator(const iterator& src) :
    2712  m_pList(src.m_pList),
    2713  m_pItem(src.m_pItem)
    2714  {
    2715  }
    2716 
    2717  const T& operator*() const
    2718  {
    2719  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2720  return m_pItem->Value;
    2721  }
    2722  const T* operator->() const
    2723  {
    2724  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2725  return &m_pItem->Value;
    2726  }
    2727 
    2728  const_iterator& operator++()
    2729  {
    2730  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2731  m_pItem = m_pItem->pNext;
    2732  return *this;
    2733  }
    2734  const_iterator& operator--()
    2735  {
    2736  if(m_pItem != VMA_NULL)
    2737  {
    2738  m_pItem = m_pItem->pPrev;
    2739  }
    2740  else
    2741  {
    2742  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    2743  m_pItem = m_pList->Back();
    2744  }
    2745  return *this;
    2746  }
    2747 
    2748  const_iterator operator++(int)
    2749  {
    2750  const_iterator result = *this;
    2751  ++*this;
    2752  return result;
    2753  }
    2754  const_iterator operator--(int)
    2755  {
    2756  const_iterator result = *this;
    2757  --*this;
    2758  return result;
    2759  }
    2760 
    2761  bool operator==(const const_iterator& rhs) const
    2762  {
    2763  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    2764  return m_pItem == rhs.m_pItem;
    2765  }
    2766  bool operator!=(const const_iterator& rhs) const
    2767  {
    2768  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    2769  return m_pItem != rhs.m_pItem;
    2770  }
    2771 
    2772  private:
    2773  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    2774  m_pList(pList),
    2775  m_pItem(pItem)
    2776  {
    2777  }
    2778 
    2779  const VmaRawList<T>* m_pList;
    2780  const VmaListItem<T>* m_pItem;
    2781 
    2782  friend class VmaList<T, AllocatorT>;
    2783  };
    2784 
    2785  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    2786 
    2787  bool empty() const { return m_RawList.IsEmpty(); }
    2788  size_t size() const { return m_RawList.GetCount(); }
    2789 
    2790  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    2791  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    2792 
    2793  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    2794  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    2795 
    2796  void clear() { m_RawList.Clear(); }
    2797  void push_back(const T& value) { m_RawList.PushBack(value); }
    2798  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    2799  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    2800 
    2801 private:
    2802  VmaRawList<T> m_RawList;
    2803 };
    2804 
    2805 #endif // #if VMA_USE_STL_LIST
    2806 
    2808 // class VmaMap
    2809 
    2810 // Unused in this version.
    2811 #if 0
    2812 
    2813 #if VMA_USE_STL_UNORDERED_MAP
    2814 
    2815 #define VmaPair std::pair
    2816 
    2817 #define VMA_MAP_TYPE(KeyT, ValueT) \
    2818  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    2819 
    2820 #else // #if VMA_USE_STL_UNORDERED_MAP
    2821 
    2822 template<typename T1, typename T2>
    2823 struct VmaPair
    2824 {
    2825  T1 first;
    2826  T2 second;
    2827 
    2828  VmaPair() : first(), second() { }
    2829  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    2830 };
    2831 
    2832 /* Class compatible with subset of interface of std::unordered_map.
    2833 KeyT, ValueT must be POD because they will be stored in VmaVector.
    2834 */
    2835 template<typename KeyT, typename ValueT>
    2836 class VmaMap
    2837 {
    2838 public:
    2839  typedef VmaPair<KeyT, ValueT> PairType;
    2840  typedef PairType* iterator;
    2841 
    2842  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    2843 
    2844  iterator begin() { return m_Vector.begin(); }
    2845  iterator end() { return m_Vector.end(); }
    2846 
    2847  void insert(const PairType& pair);
    2848  iterator find(const KeyT& key);
    2849  void erase(iterator it);
    2850 
    2851 private:
    2852  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    2853 };
    2854 
    2855 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    2856 
    2857 template<typename FirstT, typename SecondT>
    2858 struct VmaPairFirstLess
    2859 {
    2860  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    2861  {
    2862  return lhs.first < rhs.first;
    2863  }
    2864  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    2865  {
    2866  return lhs.first < rhsFirst;
    2867  }
    2868 };
    2869 
    2870 template<typename KeyT, typename ValueT>
    2871 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    2872 {
    2873  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    2874  m_Vector.data(),
    2875  m_Vector.data() + m_Vector.size(),
    2876  pair,
    2877  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    2878  VmaVectorInsert(m_Vector, indexToInsert, pair);
    2879 }
    2880 
    2881 template<typename KeyT, typename ValueT>
    2882 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    2883 {
    2884  PairType* it = VmaBinaryFindFirstNotLess(
    2885  m_Vector.data(),
    2886  m_Vector.data() + m_Vector.size(),
    2887  key,
    2888  VmaPairFirstLess<KeyT, ValueT>());
    2889  if((it != m_Vector.end()) && (it->first == key))
    2890  {
    2891  return it;
    2892  }
    2893  else
    2894  {
    2895  return m_Vector.end();
    2896  }
    2897 }
    2898 
    2899 template<typename KeyT, typename ValueT>
    2900 void VmaMap<KeyT, ValueT>::erase(iterator it)
    2901 {
    2902  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    2903 }
    2904 
    2905 #endif // #if VMA_USE_STL_UNORDERED_MAP
    2906 
    2907 #endif // #if 0
    2908 
    2910 
    2911 class VmaDeviceMemoryBlock;
    2912 
    2913 enum VMA_BLOCK_VECTOR_TYPE
    2914 {
    2915  VMA_BLOCK_VECTOR_TYPE_UNMAPPED,
    2916  VMA_BLOCK_VECTOR_TYPE_MAPPED,
    2917  VMA_BLOCK_VECTOR_TYPE_COUNT
    2918 };
    2919 
    2920 static VMA_BLOCK_VECTOR_TYPE VmaAllocationCreateFlagsToBlockVectorType(VmaAllocationCreateFlags flags)
    2921 {
    2922  return (flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0 ?
    2923  VMA_BLOCK_VECTOR_TYPE_MAPPED :
    2924  VMA_BLOCK_VECTOR_TYPE_UNMAPPED;
    2925 }
    2926 
    2927 struct VmaAllocation_T
    2928 {
    2929 public:
    2930  enum ALLOCATION_TYPE
    2931  {
    2932  ALLOCATION_TYPE_NONE,
    2933  ALLOCATION_TYPE_BLOCK,
    2934  ALLOCATION_TYPE_DEDICATED,
    2935  };
    2936 
    2937  VmaAllocation_T(uint32_t currentFrameIndex) :
    2938  m_Alignment(1),
    2939  m_Size(0),
    2940  m_pUserData(VMA_NULL),
    2941  m_Type(ALLOCATION_TYPE_NONE),
    2942  m_SuballocationType(VMA_SUBALLOCATION_TYPE_UNKNOWN),
    2943  m_LastUseFrameIndex(currentFrameIndex)
    2944  {
    2945  }
    2946 
    2947  void InitBlockAllocation(
    2948  VmaPool hPool,
    2949  VmaDeviceMemoryBlock* block,
    2950  VkDeviceSize offset,
    2951  VkDeviceSize alignment,
    2952  VkDeviceSize size,
    2953  VmaSuballocationType suballocationType,
    2954  void* pUserData,
    2955  bool canBecomeLost)
    2956  {
    2957  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    2958  VMA_ASSERT(block != VMA_NULL);
    2959  m_Type = ALLOCATION_TYPE_BLOCK;
    2960  m_Alignment = alignment;
    2961  m_Size = size;
    2962  m_pUserData = pUserData;
    2963  m_SuballocationType = suballocationType;
    2964  m_BlockAllocation.m_hPool = hPool;
    2965  m_BlockAllocation.m_Block = block;
    2966  m_BlockAllocation.m_Offset = offset;
    2967  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    2968  }
    2969 
    2970  void InitLost()
    2971  {
    2972  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    2973  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    2974  m_Type = ALLOCATION_TYPE_BLOCK;
    2975  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
    2976  m_BlockAllocation.m_Block = VMA_NULL;
    2977  m_BlockAllocation.m_Offset = 0;
    2978  m_BlockAllocation.m_CanBecomeLost = true;
    2979  }
    2980 
    2981  void ChangeBlockAllocation(
    2982  VmaDeviceMemoryBlock* block,
    2983  VkDeviceSize offset)
    2984  {
    2985  VMA_ASSERT(block != VMA_NULL);
    2986  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    2987  m_BlockAllocation.m_Block = block;
    2988  m_BlockAllocation.m_Offset = offset;
    2989  }
    2990 
    2991  void InitDedicatedAllocation(
    2992  uint32_t memoryTypeIndex,
    2993  VkDeviceMemory hMemory,
    2994  VmaSuballocationType suballocationType,
    2995  bool persistentMap,
    2996  void* pMappedData,
    2997  VkDeviceSize size,
    2998  void* pUserData)
    2999  {
    3000  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    3001  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    3002  m_Type = ALLOCATION_TYPE_DEDICATED;
    3003  m_Alignment = 0;
    3004  m_Size = size;
    3005  m_pUserData = pUserData;
    3006  m_SuballocationType = suballocationType;
    3007  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    3008  m_DedicatedAllocation.m_hMemory = hMemory;
    3009  m_DedicatedAllocation.m_PersistentMap = persistentMap;
    3010  m_DedicatedAllocation.m_pMappedData = pMappedData;
    3011  }
    3012 
    3013  ALLOCATION_TYPE GetType() const { return m_Type; }
    3014  VkDeviceSize GetAlignment() const { return m_Alignment; }
    3015  VkDeviceSize GetSize() const { return m_Size; }
    3016  void* GetUserData() const { return m_pUserData; }
    3017  void SetUserData(void* pUserData) { m_pUserData = pUserData; }
    3018  VmaSuballocationType GetSuballocationType() const { return m_SuballocationType; }
    3019 
    3020  VmaDeviceMemoryBlock* GetBlock() const
    3021  {
    3022  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    3023  return m_BlockAllocation.m_Block;
    3024  }
    3025  VkDeviceSize GetOffset() const;
    3026  VkDeviceMemory GetMemory() const;
    3027  uint32_t GetMemoryTypeIndex() const;
    3028  VMA_BLOCK_VECTOR_TYPE GetBlockVectorType() const;
    3029  void* GetMappedData() const;
    3030  bool CanBecomeLost() const;
    3031  VmaPool GetPool() const;
    3032 
    3033  VkResult DedicatedAllocMapPersistentlyMappedMemory(VmaAllocator hAllocator);
    3034  void DedicatedAllocUnmapPersistentlyMappedMemory(VmaAllocator hAllocator);
    3035 
    3036  uint32_t GetLastUseFrameIndex() const
    3037  {
    3038  return m_LastUseFrameIndex.load();
    3039  }
    3040  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    3041  {
    3042  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    3043  }
    3044  /*
    3045  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    3046  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    3047  - Else, returns false.
    3048 
    3049  If hAllocation is already lost, assert - you should not call it then.
    3050  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    3051  */
    3052  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    3053 
    3054  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    3055  {
    3056  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    3057  outInfo.blockCount = 1;
    3058  outInfo.allocationCount = 1;
    3059  outInfo.unusedRangeCount = 0;
    3060  outInfo.usedBytes = m_Size;
    3061  outInfo.unusedBytes = 0;
    3062  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    3063  outInfo.unusedRangeSizeMin = UINT64_MAX;
    3064  outInfo.unusedRangeSizeMax = 0;
    3065  }
    3066 
    3067 private:
    3068  VkDeviceSize m_Alignment;
    3069  VkDeviceSize m_Size;
    3070  void* m_pUserData;
    3071  ALLOCATION_TYPE m_Type;
    3072  VmaSuballocationType m_SuballocationType;
    3073  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    3074 
    3075  // Allocation out of VmaDeviceMemoryBlock.
    3076  struct BlockAllocation
    3077  {
    3078  VmaPool m_hPool; // Null if belongs to general memory.
    3079  VmaDeviceMemoryBlock* m_Block;
    3080  VkDeviceSize m_Offset;
    3081  bool m_CanBecomeLost;
    3082  };
    3083 
    3084  // Allocation for an object that has its own private VkDeviceMemory.
    3085  struct DedicatedAllocation
    3086  {
    3087  uint32_t m_MemoryTypeIndex;
    3088  VkDeviceMemory m_hMemory;
    3089  bool m_PersistentMap;
    3090  void* m_pMappedData;
    3091  };
    3092 
    3093  union
    3094  {
    3095  // Allocation out of VmaDeviceMemoryBlock.
    3096  BlockAllocation m_BlockAllocation;
    3097  // Allocation for an object that has its own private VkDeviceMemory.
    3098  DedicatedAllocation m_DedicatedAllocation;
    3099  };
    3100 };
    3101 
    3102 /*
    3103 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    3104 allocated memory block or free.
    3105 */
    3106 struct VmaSuballocation
    3107 {
    3108  VkDeviceSize offset;
    3109  VkDeviceSize size;
    3110  VmaAllocation hAllocation;
    3111  VmaSuballocationType type;
    3112 };
    3113 
    3114 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    3115 
    3116 // Cost of one additional allocation lost, as equivalent in bytes.
    3117 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    3118 
    3119 /*
    3120 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    3121 
    3122 If canMakeOtherLost was false:
    3123 - item points to a FREE suballocation.
    3124 - itemsToMakeLostCount is 0.
    3125 
    3126 If canMakeOtherLost was true:
    3127 - item points to first of sequence of suballocations, which are either FREE,
    3128  or point to VmaAllocations that can become lost.
    3129 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    3130  the requested allocation to succeed.
    3131 */
    3132 struct VmaAllocationRequest
    3133 {
    3134  VkDeviceSize offset;
    3135  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    3136  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    3137  VmaSuballocationList::iterator item;
    3138  size_t itemsToMakeLostCount;
    3139 
    3140  VkDeviceSize CalcCost() const
    3141  {
    3142  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    3143  }
    3144 };
    3145 
    3146 /*
    3147 Data structure used for bookkeeping of allocations and unused ranges of memory
    3148 in a single VkDeviceMemory block.
    3149 */
    3150 class VmaBlockMetadata
    3151 {
    3152 public:
    3153  VmaBlockMetadata(VmaAllocator hAllocator);
    3154  ~VmaBlockMetadata();
    3155  void Init(VkDeviceSize size);
    3156 
    3157  // Validates all data structures inside this object. If not valid, returns false.
    3158  bool Validate() const;
    3159  VkDeviceSize GetSize() const { return m_Size; }
    3160  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    3161  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    3162  VkDeviceSize GetUnusedRangeSizeMax() const;
    3163  // Returns true if this block is empty - contains only single free suballocation.
    3164  bool IsEmpty() const;
    3165 
    3166  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    3167  void AddPoolStats(VmaPoolStats& inoutStats) const;
    3168 
    3169 #if VMA_STATS_STRING_ENABLED
    3170  void PrintDetailedMap(class VmaJsonWriter& json) const;
    3171 #endif
    3172 
    3173  // Creates trivial request for case when block is empty.
    3174  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
    3175 
    3176  // Tries to find a place for suballocation with given parameters inside this block.
    3177  // If succeeded, fills pAllocationRequest and returns true.
    3178  // If failed, returns false.
    3179  bool CreateAllocationRequest(
    3180  uint32_t currentFrameIndex,
    3181  uint32_t frameInUseCount,
    3182  VkDeviceSize bufferImageGranularity,
    3183  VkDeviceSize allocSize,
    3184  VkDeviceSize allocAlignment,
    3185  VmaSuballocationType allocType,
    3186  bool canMakeOtherLost,
    3187  VmaAllocationRequest* pAllocationRequest);
    3188 
    3189  bool MakeRequestedAllocationsLost(
    3190  uint32_t currentFrameIndex,
    3191  uint32_t frameInUseCount,
    3192  VmaAllocationRequest* pAllocationRequest);
    3193 
    3194  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    3195 
    3196  // Makes actual allocation based on request. Request must already be checked and valid.
    3197  void Alloc(
    3198  const VmaAllocationRequest& request,
    3199  VmaSuballocationType type,
    3200  VkDeviceSize allocSize,
    3201  VmaAllocation hAllocation);
    3202 
    3203  // Frees suballocation assigned to given memory region.
    3204  void Free(const VmaAllocation allocation);
    3205 
    3206 private:
    3207  VkDeviceSize m_Size;
    3208  uint32_t m_FreeCount;
    3209  VkDeviceSize m_SumFreeSize;
    3210  VmaSuballocationList m_Suballocations;
    3211  // Suballocations that are free and have size greater than certain threshold.
    3212  // Sorted by size, ascending.
    3213  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    3214 
    3215  bool ValidateFreeSuballocationList() const;
    3216 
    3217  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    3218  // If yes, fills pOffset and returns true. If no, returns false.
    3219  bool CheckAllocation(
    3220  uint32_t currentFrameIndex,
    3221  uint32_t frameInUseCount,
    3222  VkDeviceSize bufferImageGranularity,
    3223  VkDeviceSize allocSize,
    3224  VkDeviceSize allocAlignment,
    3225  VmaSuballocationType allocType,
    3226  VmaSuballocationList::const_iterator suballocItem,
    3227  bool canMakeOtherLost,
    3228  VkDeviceSize* pOffset,
    3229  size_t* itemsToMakeLostCount,
    3230  VkDeviceSize* pSumFreeSize,
    3231  VkDeviceSize* pSumItemSize) const;
    3232  // Given free suballocation, it merges it with following one, which must also be free.
    3233  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    3234  // Releases given suballocation, making it free.
    3235  // Merges it with adjacent free suballocations if applicable.
    3236  // Returns iterator to new free suballocation at this place.
    3237  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    3238  // Given free suballocation, it inserts it into sorted list of
    3239  // m_FreeSuballocationsBySize if it's suitable.
    3240  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    3241  // Given free suballocation, it removes it from sorted list of
    3242  // m_FreeSuballocationsBySize if it's suitable.
    3243  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    3244 };
    3245 
    3246 /*
    3247 Represents a single block of device memory (`VkDeviceMemory`) with all the
    3248 data about its regions (aka suballocations, `VmaAllocation`), assigned and free.
    3249 
    3250 Thread-safety: This class must be externally synchronized.
    3251 */
    3252 class VmaDeviceMemoryBlock
    3253 {
    3254 public:
    3255  uint32_t m_MemoryTypeIndex;
    3256  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
    3257  VkDeviceMemory m_hMemory;
    3258  bool m_PersistentMap;
    3259  void* m_pMappedData;
    3260  VmaBlockMetadata m_Metadata;
    3261 
    3262  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    3263 
    3264  ~VmaDeviceMemoryBlock()
    3265  {
    3266  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    3267  }
    3268 
    3269  // Always call after construction.
    3270  void Init(
    3271  uint32_t newMemoryTypeIndex,
    3272  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
    3273  VkDeviceMemory newMemory,
    3274  VkDeviceSize newSize,
    3275  bool persistentMap,
    3276  void* pMappedData);
    3277  // Always call before destruction.
    3278  void Destroy(VmaAllocator allocator);
    3279 
    3280  // Validates all data structures inside this object. If not valid, returns false.
    3281  bool Validate() const;
    3282 };
    3283 
    3284 struct VmaPointerLess
    3285 {
    3286  bool operator()(const void* lhs, const void* rhs) const
    3287  {
    3288  return lhs < rhs;
    3289  }
    3290 };
    3291 
    3292 class VmaDefragmentator;
    3293 
    3294 /*
    3295 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    3296 Vulkan memory type.
    3297 
    3298 Synchronized internally with a mutex.
    3299 */
    3300 struct VmaBlockVector
    3301 {
    3302  VmaBlockVector(
    3303  VmaAllocator hAllocator,
    3304  uint32_t memoryTypeIndex,
    3305  VMA_BLOCK_VECTOR_TYPE blockVectorType,
    3306  VkDeviceSize preferredBlockSize,
    3307  size_t minBlockCount,
    3308  size_t maxBlockCount,
    3309  VkDeviceSize bufferImageGranularity,
    3310  uint32_t frameInUseCount,
    3311  bool isCustomPool);
    3312  ~VmaBlockVector();
    3313 
    3314  VkResult CreateMinBlocks();
    3315 
    3316  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    3317  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    3318  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    3319  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    3320  VMA_BLOCK_VECTOR_TYPE GetBlockVectorType() const { return m_BlockVectorType; }
    3321 
    3322  void GetPoolStats(VmaPoolStats* pStats);
    3323 
    3324  bool IsEmpty() const { return m_Blocks.empty(); }
    3325 
    3326  VkResult Allocate(
    3327  VmaPool hCurrentPool,
    3328  uint32_t currentFrameIndex,
    3329  const VkMemoryRequirements& vkMemReq,
    3330  const VmaAllocationCreateInfo& createInfo,
    3331  VmaSuballocationType suballocType,
    3332  VmaAllocation* pAllocation);
    3333 
    3334  void Free(
    3335  VmaAllocation hAllocation);
    3336 
    3337  // Adds statistics of this BlockVector to pStats.
    3338  void AddStats(VmaStats* pStats);
    3339 
    3340 #if VMA_STATS_STRING_ENABLED
    3341  void PrintDetailedMap(class VmaJsonWriter& json);
    3342 #endif
    3343 
    3344  void UnmapPersistentlyMappedMemory();
    3345  VkResult MapPersistentlyMappedMemory();
    3346 
    3347  void MakePoolAllocationsLost(
    3348  uint32_t currentFrameIndex,
    3349  size_t* pLostAllocationCount);
    3350 
    3351  VmaDefragmentator* EnsureDefragmentator(
    3352  VmaAllocator hAllocator,
    3353  uint32_t currentFrameIndex);
    3354 
    3355  VkResult Defragment(
    3356  VmaDefragmentationStats* pDefragmentationStats,
    3357  VkDeviceSize& maxBytesToMove,
    3358  uint32_t& maxAllocationsToMove);
    3359 
    3360  void DestroyDefragmentator();
    3361 
    3362 private:
    3363  friend class VmaDefragmentator;
    3364 
    3365  const VmaAllocator m_hAllocator;
    3366  const uint32_t m_MemoryTypeIndex;
    3367  const VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
    3368  const VkDeviceSize m_PreferredBlockSize;
    3369  const size_t m_MinBlockCount;
    3370  const size_t m_MaxBlockCount;
    3371  const VkDeviceSize m_BufferImageGranularity;
    3372  const uint32_t m_FrameInUseCount;
    3373  const bool m_IsCustomPool;
    3374  VMA_MUTEX m_Mutex;
    3375  // Incrementally sorted by sumFreeSize, ascending.
    3376  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    3377  /* There can be at most one allocation that is completely empty - a
    3378  hysteresis to avoid pessimistic case of alternating creation and destruction
    3379  of a VkDeviceMemory. */
    3380  bool m_HasEmptyBlock;
    3381  VmaDefragmentator* m_pDefragmentator;
    3382 
    3383  // Finds and removes given block from vector.
    3384  void Remove(VmaDeviceMemoryBlock* pBlock);
    3385 
    3386  // Performs single step in sorting m_Blocks. They may not be fully sorted
    3387  // after this call.
    3388  void IncrementallySortBlocks();
    3389 
    3390  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    3391 };
    3392 
    3393 struct VmaPool_T
    3394 {
    3395 public:
    3396  VmaBlockVector m_BlockVector;
    3397 
    3398  // Takes ownership.
    3399  VmaPool_T(
    3400  VmaAllocator hAllocator,
    3401  const VmaPoolCreateInfo& createInfo);
    3402  ~VmaPool_T();
    3403 
    3404  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
    3405 
    3406 #if VMA_STATS_STRING_ENABLED
    3407  //void PrintDetailedMap(class VmaStringBuilder& sb);
    3408 #endif
    3409 };
    3410 
    3411 class VmaDefragmentator
    3412 {
    3413  const VmaAllocator m_hAllocator;
    3414  VmaBlockVector* const m_pBlockVector;
    3415  uint32_t m_CurrentFrameIndex;
    3416  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
    3417  VkDeviceSize m_BytesMoved;
    3418  uint32_t m_AllocationsMoved;
    3419 
    3420  struct AllocationInfo
    3421  {
    3422  VmaAllocation m_hAllocation;
    3423  VkBool32* m_pChanged;
    3424 
    3425  AllocationInfo() :
    3426  m_hAllocation(VK_NULL_HANDLE),
    3427  m_pChanged(VMA_NULL)
    3428  {
    3429  }
    3430  };
    3431 
    3432  struct AllocationInfoSizeGreater
    3433  {
    3434  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    3435  {
    3436  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    3437  }
    3438  };
    3439 
    3440  // Used between AddAllocation and Defragment.
    3441  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    3442 
    3443  struct BlockInfo
    3444  {
    3445  VmaDeviceMemoryBlock* m_pBlock;
    3446  bool m_HasNonMovableAllocations;
    3447  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    3448 
    3449  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    3450  m_pBlock(VMA_NULL),
    3451  m_HasNonMovableAllocations(true),
    3452  m_Allocations(pAllocationCallbacks),
    3453  m_pMappedDataForDefragmentation(VMA_NULL)
    3454  {
    3455  }
    3456 
    3457  void CalcHasNonMovableAllocations()
    3458  {
    3459  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
    3460  const size_t defragmentAllocCount = m_Allocations.size();
    3461  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    3462  }
    3463 
    3464  void SortAllocationsBySizeDescecnding()
    3465  {
    3466  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    3467  }
    3468 
    3469  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
    3470  void Unmap(VmaAllocator hAllocator);
    3471 
    3472  private:
    3473  // Not null if mapped for defragmentation only, not persistently mapped.
    3474  void* m_pMappedDataForDefragmentation;
    3475  };
    3476 
    3477  struct BlockPointerLess
    3478  {
    3479  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    3480  {
    3481  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    3482  }
    3483  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    3484  {
    3485  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    3486  }
    3487  };
    3488 
    3489  // 1. Blocks with some non-movable allocations go first.
    3490  // 2. Blocks with smaller sumFreeSize go first.
    3491  struct BlockInfoCompareMoveDestination
    3492  {
    3493  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    3494  {
    3495  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    3496  {
    3497  return true;
    3498  }
    3499  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    3500  {
    3501  return false;
    3502  }
    3503  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
    3504  {
    3505  return true;
    3506  }
    3507  return false;
    3508  }
    3509  };
    3510 
    3511  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    3512  BlockInfoVector m_Blocks;
    3513 
    3514  VkResult DefragmentRound(
    3515  VkDeviceSize maxBytesToMove,
    3516  uint32_t maxAllocationsToMove);
    3517 
    3518  static bool MoveMakesSense(
    3519  size_t dstBlockIndex, VkDeviceSize dstOffset,
    3520  size_t srcBlockIndex, VkDeviceSize srcOffset);
    3521 
    3522 public:
    3523  VmaDefragmentator(
    3524  VmaAllocator hAllocator,
    3525  VmaBlockVector* pBlockVector,
    3526  uint32_t currentFrameIndex);
    3527 
    3528  ~VmaDefragmentator();
    3529 
    3530  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    3531  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    3532 
    3533  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    3534 
    3535  VkResult Defragment(
    3536  VkDeviceSize maxBytesToMove,
    3537  uint32_t maxAllocationsToMove);
    3538 };
    3539 
    3540 // Main allocator object.
    3541 struct VmaAllocator_T
    3542 {
    3543  bool m_UseMutex;
    3544  bool m_UseKhrDedicatedAllocation;
    3545  VkDevice m_hDevice;
    3546  bool m_AllocationCallbacksSpecified;
    3547  VkAllocationCallbacks m_AllocationCallbacks;
    3548  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    3549  // Non-zero when we are inside UnmapPersistentlyMappedMemory...MapPersistentlyMappedMemory.
    3550  // Counter to allow nested calls to these functions.
    3551  uint32_t m_UnmapPersistentlyMappedMemoryCounter;
    3552 
    3553  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
    3554  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    3555  VMA_MUTEX m_HeapSizeLimitMutex;
    3556 
    3557  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    3558  VkPhysicalDeviceMemoryProperties m_MemProps;
    3559 
    3560  // Default pools.
    3561  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
    3562 
    3563  // Each vector is sorted by memory (handle value).
    3564  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    3565  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
    3566  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    3567 
    3568  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    3569  ~VmaAllocator_T();
    3570 
    3571  const VkAllocationCallbacks* GetAllocationCallbacks() const
    3572  {
    3573  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    3574  }
    3575  const VmaVulkanFunctions& GetVulkanFunctions() const
    3576  {
    3577  return m_VulkanFunctions;
    3578  }
    3579 
    3580  VkDeviceSize GetBufferImageGranularity() const
    3581  {
    3582  return VMA_MAX(
    3583  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    3584  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    3585  }
    3586 
    3587  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    3588  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    3589 
    3590  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    3591  {
    3592  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    3593  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    3594  }
    3595 
    3596  void GetBufferMemoryRequirements(
    3597  VkBuffer hBuffer,
    3598  VkMemoryRequirements& memReq,
    3599  bool& requiresDedicatedAllocation,
    3600  bool& prefersDedicatedAllocation) const;
    3601  void GetImageMemoryRequirements(
    3602  VkImage hImage,
    3603  VkMemoryRequirements& memReq,
    3604  bool& requiresDedicatedAllocation,
    3605  bool& prefersDedicatedAllocation) const;
    3606 
    3607  // Main allocation function.
    3608  VkResult AllocateMemory(
    3609  const VkMemoryRequirements& vkMemReq,
    3610  bool requiresDedicatedAllocation,
    3611  bool prefersDedicatedAllocation,
    3612  VkBuffer dedicatedBuffer,
    3613  VkImage dedicatedImage,
    3614  const VmaAllocationCreateInfo& createInfo,
    3615  VmaSuballocationType suballocType,
    3616  VmaAllocation* pAllocation);
    3617 
    3618  // Main deallocation function.
    3619  void FreeMemory(const VmaAllocation allocation);
    3620 
    3621  void CalculateStats(VmaStats* pStats);
    3622 
    3623 #if VMA_STATS_STRING_ENABLED
    3624  void PrintDetailedMap(class VmaJsonWriter& json);
    3625 #endif
    3626 
    3627  void UnmapPersistentlyMappedMemory();
    3628  VkResult MapPersistentlyMappedMemory();
    3629 
    3630  VkResult Defragment(
    3631  VmaAllocation* pAllocations,
    3632  size_t allocationCount,
    3633  VkBool32* pAllocationsChanged,
    3634  const VmaDefragmentationInfo* pDefragmentationInfo,
    3635  VmaDefragmentationStats* pDefragmentationStats);
    3636 
    3637  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    3638 
    3639  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    3640  void DestroyPool(VmaPool pool);
    3641  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    3642 
    3643  void SetCurrentFrameIndex(uint32_t frameIndex);
    3644 
    3645  void MakePoolAllocationsLost(
    3646  VmaPool hPool,
    3647  size_t* pLostAllocationCount);
    3648 
    3649  void CreateLostAllocation(VmaAllocation* pAllocation);
    3650 
    3651  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    3652  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    3653 
    3654 private:
    3655  VkDeviceSize m_PreferredLargeHeapBlockSize;
    3656  VkDeviceSize m_PreferredSmallHeapBlockSize;
    3657 
    3658  VkPhysicalDevice m_PhysicalDevice;
    3659  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    3660 
    3661  VMA_MUTEX m_PoolsMutex;
    3662  // Protected by m_PoolsMutex. Sorted by pointer value.
    3663  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    3664 
    3665  VmaVulkanFunctions m_VulkanFunctions;
    3666 
    3667  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    3668 
    3669  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    3670 
    3671  VkResult AllocateMemoryOfType(
    3672  const VkMemoryRequirements& vkMemReq,
    3673  bool dedicatedAllocation,
    3674  VkBuffer dedicatedBuffer,
    3675  VkImage dedicatedImage,
    3676  const VmaAllocationCreateInfo& createInfo,
    3677  uint32_t memTypeIndex,
    3678  VmaSuballocationType suballocType,
    3679  VmaAllocation* pAllocation);
    3680 
    3681  // Allocates and registers new VkDeviceMemory specifically for single allocation.
    3682  VkResult AllocateDedicatedMemory(
    3683  VkDeviceSize size,
    3684  VmaSuballocationType suballocType,
    3685  uint32_t memTypeIndex,
    3686  bool map,
    3687  void* pUserData,
    3688  VkBuffer dedicatedBuffer,
    3689  VkImage dedicatedImage,
    3690  VmaAllocation* pAllocation);
    3691 
    3692  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    3693  void FreeDedicatedMemory(VmaAllocation allocation);
    3694 };
    3695 
    3697 // Memory allocation #2 after VmaAllocator_T definition
    3698 
    3699 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    3700 {
    3701  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    3702 }
    3703 
    3704 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    3705 {
    3706  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    3707 }
    3708 
    3709 template<typename T>
    3710 static T* VmaAllocate(VmaAllocator hAllocator)
    3711 {
    3712  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    3713 }
    3714 
    3715 template<typename T>
    3716 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    3717 {
    3718  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    3719 }
    3720 
    3721 template<typename T>
    3722 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    3723 {
    3724  if(ptr != VMA_NULL)
    3725  {
    3726  ptr->~T();
    3727  VmaFree(hAllocator, ptr);
    3728  }
    3729 }
    3730 
    3731 template<typename T>
    3732 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    3733 {
    3734  if(ptr != VMA_NULL)
    3735  {
    3736  for(size_t i = count; i--; )
    3737  ptr[i].~T();
    3738  VmaFree(hAllocator, ptr);
    3739  }
    3740 }
    3741 
    3743 // VmaStringBuilder
    3744 
    3745 #if VMA_STATS_STRING_ENABLED
    3746 
    3747 class VmaStringBuilder
    3748 {
    3749 public:
    3750  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    3751  size_t GetLength() const { return m_Data.size(); }
    3752  const char* GetData() const { return m_Data.data(); }
    3753 
    3754  void Add(char ch) { m_Data.push_back(ch); }
    3755  void Add(const char* pStr);
    3756  void AddNewLine() { Add('\n'); }
    3757  void AddNumber(uint32_t num);
    3758  void AddNumber(uint64_t num);
    3759  void AddPointer(const void* ptr);
    3760 
    3761 private:
    3762  VmaVector< char, VmaStlAllocator<char> > m_Data;
    3763 };
    3764 
    3765 void VmaStringBuilder::Add(const char* pStr)
    3766 {
    3767  const size_t strLen = strlen(pStr);
    3768  if(strLen > 0)
    3769  {
    3770  const size_t oldCount = m_Data.size();
    3771  m_Data.resize(oldCount + strLen);
    3772  memcpy(m_Data.data() + oldCount, pStr, strLen);
    3773  }
    3774 }
    3775 
    3776 void VmaStringBuilder::AddNumber(uint32_t num)
    3777 {
    3778  char buf[11];
    3779  VmaUint32ToStr(buf, sizeof(buf), num);
    3780  Add(buf);
    3781 }
    3782 
    3783 void VmaStringBuilder::AddNumber(uint64_t num)
    3784 {
    3785  char buf[21];
    3786  VmaUint64ToStr(buf, sizeof(buf), num);
    3787  Add(buf);
    3788 }
    3789 
    3790 void VmaStringBuilder::AddPointer(const void* ptr)
    3791 {
    3792  char buf[21];
    3793  VmaPtrToStr(buf, sizeof(buf), ptr);
    3794  Add(buf);
    3795 }
    3796 
    3797 #endif // #if VMA_STATS_STRING_ENABLED
    3798 
    3800 // VmaJsonWriter
    3801 
    3802 #if VMA_STATS_STRING_ENABLED
    3803 
    3804 class VmaJsonWriter
    3805 {
    3806 public:
    3807  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    3808  ~VmaJsonWriter();
    3809 
    3810  void BeginObject(bool singleLine = false);
    3811  void EndObject();
    3812 
    3813  void BeginArray(bool singleLine = false);
    3814  void EndArray();
    3815 
    3816  void WriteString(const char* pStr);
    3817  void BeginString(const char* pStr = VMA_NULL);
    3818  void ContinueString(const char* pStr);
    3819  void ContinueString(uint32_t n);
    3820  void ContinueString(uint64_t n);
    3821  void EndString(const char* pStr = VMA_NULL);
    3822 
    3823  void WriteNumber(uint32_t n);
    3824  void WriteNumber(uint64_t n);
    3825  void WriteBool(bool b);
    3826  void WriteNull();
    3827 
    3828 private:
    3829  static const char* const INDENT;
    3830 
    3831  enum COLLECTION_TYPE
    3832  {
    3833  COLLECTION_TYPE_OBJECT,
    3834  COLLECTION_TYPE_ARRAY,
    3835  };
    3836  struct StackItem
    3837  {
    3838  COLLECTION_TYPE type;
    3839  uint32_t valueCount;
    3840  bool singleLineMode;
    3841  };
    3842 
    3843  VmaStringBuilder& m_SB;
    3844  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    3845  bool m_InsideString;
    3846 
    3847  void BeginValue(bool isString);
    3848  void WriteIndent(bool oneLess = false);
    3849 };
    3850 
    3851 const char* const VmaJsonWriter::INDENT = " ";
    3852 
    3853 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    3854  m_SB(sb),
    3855  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    3856  m_InsideString(false)
    3857 {
    3858 }
    3859 
    3860 VmaJsonWriter::~VmaJsonWriter()
    3861 {
    3862  VMA_ASSERT(!m_InsideString);
    3863  VMA_ASSERT(m_Stack.empty());
    3864 }
    3865 
    3866 void VmaJsonWriter::BeginObject(bool singleLine)
    3867 {
    3868  VMA_ASSERT(!m_InsideString);
    3869 
    3870  BeginValue(false);
    3871  m_SB.Add('{');
    3872 
    3873  StackItem item;
    3874  item.type = COLLECTION_TYPE_OBJECT;
    3875  item.valueCount = 0;
    3876  item.singleLineMode = singleLine;
    3877  m_Stack.push_back(item);
    3878 }
    3879 
    3880 void VmaJsonWriter::EndObject()
    3881 {
    3882  VMA_ASSERT(!m_InsideString);
    3883 
    3884  WriteIndent(true);
    3885  m_SB.Add('}');
    3886 
    3887  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    3888  m_Stack.pop_back();
    3889 }
    3890 
    3891 void VmaJsonWriter::BeginArray(bool singleLine)
    3892 {
    3893  VMA_ASSERT(!m_InsideString);
    3894 
    3895  BeginValue(false);
    3896  m_SB.Add('[');
    3897 
    3898  StackItem item;
    3899  item.type = COLLECTION_TYPE_ARRAY;
    3900  item.valueCount = 0;
    3901  item.singleLineMode = singleLine;
    3902  m_Stack.push_back(item);
    3903 }
    3904 
    3905 void VmaJsonWriter::EndArray()
    3906 {
    3907  VMA_ASSERT(!m_InsideString);
    3908 
    3909  WriteIndent(true);
    3910  m_SB.Add(']');
    3911 
    3912  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    3913  m_Stack.pop_back();
    3914 }
    3915 
    3916 void VmaJsonWriter::WriteString(const char* pStr)
    3917 {
    3918  BeginString(pStr);
    3919  EndString();
    3920 }
    3921 
    3922 void VmaJsonWriter::BeginString(const char* pStr)
    3923 {
    3924  VMA_ASSERT(!m_InsideString);
    3925 
    3926  BeginValue(true);
    3927  m_SB.Add('"');
    3928  m_InsideString = true;
    3929  if(pStr != VMA_NULL && pStr[0] != '\0')
    3930  {
    3931  ContinueString(pStr);
    3932  }
    3933 }
    3934 
    3935 void VmaJsonWriter::ContinueString(const char* pStr)
    3936 {
    3937  VMA_ASSERT(m_InsideString);
    3938 
    3939  const size_t strLen = strlen(pStr);
    3940  for(size_t i = 0; i < strLen; ++i)
    3941  {
    3942  char ch = pStr[i];
    3943  if(ch == '\'')
    3944  {
    3945  m_SB.Add("\\\\");
    3946  }
    3947  else if(ch == '"')
    3948  {
    3949  m_SB.Add("\\\"");
    3950  }
    3951  else if(ch >= 32)
    3952  {
    3953  m_SB.Add(ch);
    3954  }
    3955  else switch(ch)
    3956  {
    3957  case '\n':
    3958  m_SB.Add("\\n");
    3959  break;
    3960  case '\r':
    3961  m_SB.Add("\\r");
    3962  break;
    3963  case '\t':
    3964  m_SB.Add("\\t");
    3965  break;
    3966  default:
    3967  VMA_ASSERT(0 && "Character not currently supported.");
    3968  break;
    3969  }
    3970  }
    3971 }
    3972 
    3973 void VmaJsonWriter::ContinueString(uint32_t n)
    3974 {
    3975  VMA_ASSERT(m_InsideString);
    3976  m_SB.AddNumber(n);
    3977 }
    3978 
    3979 void VmaJsonWriter::ContinueString(uint64_t n)
    3980 {
    3981  VMA_ASSERT(m_InsideString);
    3982  m_SB.AddNumber(n);
    3983 }
    3984 
    3985 void VmaJsonWriter::EndString(const char* pStr)
    3986 {
    3987  VMA_ASSERT(m_InsideString);
    3988  if(pStr != VMA_NULL && pStr[0] != '\0')
    3989  {
    3990  ContinueString(pStr);
    3991  }
    3992  m_SB.Add('"');
    3993  m_InsideString = false;
    3994 }
    3995 
    3996 void VmaJsonWriter::WriteNumber(uint32_t n)
    3997 {
    3998  VMA_ASSERT(!m_InsideString);
    3999  BeginValue(false);
    4000  m_SB.AddNumber(n);
    4001 }
    4002 
    4003 void VmaJsonWriter::WriteNumber(uint64_t n)
    4004 {
    4005  VMA_ASSERT(!m_InsideString);
    4006  BeginValue(false);
    4007  m_SB.AddNumber(n);
    4008 }
    4009 
    4010 void VmaJsonWriter::WriteBool(bool b)
    4011 {
    4012  VMA_ASSERT(!m_InsideString);
    4013  BeginValue(false);
    4014  m_SB.Add(b ? "true" : "false");
    4015 }
    4016 
    4017 void VmaJsonWriter::WriteNull()
    4018 {
    4019  VMA_ASSERT(!m_InsideString);
    4020  BeginValue(false);
    4021  m_SB.Add("null");
    4022 }
    4023 
    4024 void VmaJsonWriter::BeginValue(bool isString)
    4025 {
    4026  if(!m_Stack.empty())
    4027  {
    4028  StackItem& currItem = m_Stack.back();
    4029  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    4030  currItem.valueCount % 2 == 0)
    4031  {
    4032  VMA_ASSERT(isString);
    4033  }
    4034 
    4035  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    4036  currItem.valueCount % 2 != 0)
    4037  {
    4038  m_SB.Add(": ");
    4039  }
    4040  else if(currItem.valueCount > 0)
    4041  {
    4042  m_SB.Add(", ");
    4043  WriteIndent();
    4044  }
    4045  else
    4046  {
    4047  WriteIndent();
    4048  }
    4049  ++currItem.valueCount;
    4050  }
    4051 }
    4052 
    4053 void VmaJsonWriter::WriteIndent(bool oneLess)
    4054 {
    4055  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    4056  {
    4057  m_SB.AddNewLine();
    4058 
    4059  size_t count = m_Stack.size();
    4060  if(count > 0 && oneLess)
    4061  {
    4062  --count;
    4063  }
    4064  for(size_t i = 0; i < count; ++i)
    4065  {
    4066  m_SB.Add(INDENT);
    4067  }
    4068  }
    4069 }
    4070 
    4071 #endif // #if VMA_STATS_STRING_ENABLED
    4072 
    4074 
    4075 VkDeviceSize VmaAllocation_T::GetOffset() const
    4076 {
    4077  switch(m_Type)
    4078  {
    4079  case ALLOCATION_TYPE_BLOCK:
    4080  return m_BlockAllocation.m_Offset;
    4081  case ALLOCATION_TYPE_DEDICATED:
    4082  return 0;
    4083  default:
    4084  VMA_ASSERT(0);
    4085  return 0;
    4086  }
    4087 }
    4088 
    4089 VkDeviceMemory VmaAllocation_T::GetMemory() const
    4090 {
    4091  switch(m_Type)
    4092  {
    4093  case ALLOCATION_TYPE_BLOCK:
    4094  return m_BlockAllocation.m_Block->m_hMemory;
    4095  case ALLOCATION_TYPE_DEDICATED:
    4096  return m_DedicatedAllocation.m_hMemory;
    4097  default:
    4098  VMA_ASSERT(0);
    4099  return VK_NULL_HANDLE;
    4100  }
    4101 }
    4102 
    4103 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    4104 {
    4105  switch(m_Type)
    4106  {
    4107  case ALLOCATION_TYPE_BLOCK:
    4108  return m_BlockAllocation.m_Block->m_MemoryTypeIndex;
    4109  case ALLOCATION_TYPE_DEDICATED:
    4110  return m_DedicatedAllocation.m_MemoryTypeIndex;
    4111  default:
    4112  VMA_ASSERT(0);
    4113  return UINT32_MAX;
    4114  }
    4115 }
    4116 
    4117 VMA_BLOCK_VECTOR_TYPE VmaAllocation_T::GetBlockVectorType() const
    4118 {
    4119  switch(m_Type)
    4120  {
    4121  case ALLOCATION_TYPE_BLOCK:
    4122  return m_BlockAllocation.m_Block->m_BlockVectorType;
    4123  case ALLOCATION_TYPE_DEDICATED:
    4124  return (m_DedicatedAllocation.m_PersistentMap ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED);
    4125  default:
    4126  VMA_ASSERT(0);
    4127  return VMA_BLOCK_VECTOR_TYPE_COUNT;
    4128  }
    4129 }
    4130 
    4131 void* VmaAllocation_T::GetMappedData() const
    4132 {
    4133  switch(m_Type)
    4134  {
    4135  case ALLOCATION_TYPE_BLOCK:
    4136  if(m_BlockAllocation.m_Block->m_pMappedData != VMA_NULL)
    4137  {
    4138  return (char*)m_BlockAllocation.m_Block->m_pMappedData + m_BlockAllocation.m_Offset;
    4139  }
    4140  else
    4141  {
    4142  return VMA_NULL;
    4143  }
    4144  break;
    4145  case ALLOCATION_TYPE_DEDICATED:
    4146  return m_DedicatedAllocation.m_pMappedData;
    4147  default:
    4148  VMA_ASSERT(0);
    4149  return VMA_NULL;
    4150  }
    4151 }
    4152 
    4153 bool VmaAllocation_T::CanBecomeLost() const
    4154 {
    4155  switch(m_Type)
    4156  {
    4157  case ALLOCATION_TYPE_BLOCK:
    4158  return m_BlockAllocation.m_CanBecomeLost;
    4159  case ALLOCATION_TYPE_DEDICATED:
    4160  return false;
    4161  default:
    4162  VMA_ASSERT(0);
    4163  return false;
    4164  }
    4165 }
    4166 
    4167 VmaPool VmaAllocation_T::GetPool() const
    4168 {
    4169  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    4170  return m_BlockAllocation.m_hPool;
    4171 }
    4172 
    4173 VkResult VmaAllocation_T::DedicatedAllocMapPersistentlyMappedMemory(VmaAllocator hAllocator)
    4174 {
    4175  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    4176  if(m_DedicatedAllocation.m_PersistentMap)
    4177  {
    4178  return (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    4179  hAllocator->m_hDevice,
    4180  m_DedicatedAllocation.m_hMemory,
    4181  0,
    4182  VK_WHOLE_SIZE,
    4183  0,
    4184  &m_DedicatedAllocation.m_pMappedData);
    4185  }
    4186  return VK_SUCCESS;
    4187 }
    4188 void VmaAllocation_T::DedicatedAllocUnmapPersistentlyMappedMemory(VmaAllocator hAllocator)
    4189 {
    4190  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    4191  if(m_DedicatedAllocation.m_pMappedData)
    4192  {
    4193  VMA_ASSERT(m_DedicatedAllocation.m_PersistentMap);
    4194  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_DedicatedAllocation.m_hMemory);
    4195  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    4196  }
    4197 }
    4198 
    4199 
    4200 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    4201 {
    4202  VMA_ASSERT(CanBecomeLost());
    4203 
    4204  /*
    4205  Warning: This is a carefully designed algorithm.
    4206  Do not modify unless you really know what you're doing :)
    4207  */
    4208  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    4209  for(;;)
    4210  {
    4211  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    4212  {
    4213  VMA_ASSERT(0);
    4214  return false;
    4215  }
    4216  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    4217  {
    4218  return false;
    4219  }
    4220  else // Last use time earlier than current time.
    4221  {
    4222  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    4223  {
    4224  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    4225  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    4226  return true;
    4227  }
    4228  }
    4229  }
    4230 }
    4231 
    4232 #if VMA_STATS_STRING_ENABLED
    4233 
    4234 // Correspond to values of enum VmaSuballocationType.
    4235 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    4236  "FREE",
    4237  "UNKNOWN",
    4238  "BUFFER",
    4239  "IMAGE_UNKNOWN",
    4240  "IMAGE_LINEAR",
    4241  "IMAGE_OPTIMAL",
    4242 };
    4243 
    4244 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    4245 {
    4246  json.BeginObject();
    4247 
    4248  json.WriteString("Blocks");
    4249  json.WriteNumber(stat.blockCount);
    4250 
    4251  json.WriteString("Allocations");
    4252  json.WriteNumber(stat.allocationCount);
    4253 
    4254  json.WriteString("UnusedRanges");
    4255  json.WriteNumber(stat.unusedRangeCount);
    4256 
    4257  json.WriteString("UsedBytes");
    4258  json.WriteNumber(stat.usedBytes);
    4259 
    4260  json.WriteString("UnusedBytes");
    4261  json.WriteNumber(stat.unusedBytes);
    4262 
    4263  if(stat.allocationCount > 1)
    4264  {
    4265  json.WriteString("AllocationSize");
    4266  json.BeginObject(true);
    4267  json.WriteString("Min");
    4268  json.WriteNumber(stat.allocationSizeMin);
    4269  json.WriteString("Avg");
    4270  json.WriteNumber(stat.allocationSizeAvg);
    4271  json.WriteString("Max");
    4272  json.WriteNumber(stat.allocationSizeMax);
    4273  json.EndObject();
    4274  }
    4275 
    4276  if(stat.unusedRangeCount > 1)
    4277  {
    4278  json.WriteString("UnusedRangeSize");
    4279  json.BeginObject(true);
    4280  json.WriteString("Min");
    4281  json.WriteNumber(stat.unusedRangeSizeMin);
    4282  json.WriteString("Avg");
    4283  json.WriteNumber(stat.unusedRangeSizeAvg);
    4284  json.WriteString("Max");
    4285  json.WriteNumber(stat.unusedRangeSizeMax);
    4286  json.EndObject();
    4287  }
    4288 
    4289  json.EndObject();
    4290 }
    4291 
    4292 #endif // #if VMA_STATS_STRING_ENABLED
    4293 
    4294 struct VmaSuballocationItemSizeLess
    4295 {
    4296  bool operator()(
    4297  const VmaSuballocationList::iterator lhs,
    4298  const VmaSuballocationList::iterator rhs) const
    4299  {
    4300  return lhs->size < rhs->size;
    4301  }
    4302  bool operator()(
    4303  const VmaSuballocationList::iterator lhs,
    4304  VkDeviceSize rhsSize) const
    4305  {
    4306  return lhs->size < rhsSize;
    4307  }
    4308 };
    4309 
    4311 // class VmaBlockMetadata
    4312 
    4313 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    4314  m_Size(0),
    4315  m_FreeCount(0),
    4316  m_SumFreeSize(0),
    4317  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    4318  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    4319 {
    4320 }
    4321 
    4322 VmaBlockMetadata::~VmaBlockMetadata()
    4323 {
    4324 }
    4325 
    4326 void VmaBlockMetadata::Init(VkDeviceSize size)
    4327 {
    4328  m_Size = size;
    4329  m_FreeCount = 1;
    4330  m_SumFreeSize = size;
    4331 
    4332  VmaSuballocation suballoc = {};
    4333  suballoc.offset = 0;
    4334  suballoc.size = size;
    4335  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    4336  suballoc.hAllocation = VK_NULL_HANDLE;
    4337 
    4338  m_Suballocations.push_back(suballoc);
    4339  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    4340  --suballocItem;
    4341  m_FreeSuballocationsBySize.push_back(suballocItem);
    4342 }
    4343 
    4344 bool VmaBlockMetadata::Validate() const
    4345 {
    4346  if(m_Suballocations.empty())
    4347  {
    4348  return false;
    4349  }
    4350 
    4351  // Expected offset of new suballocation as calculates from previous ones.
    4352  VkDeviceSize calculatedOffset = 0;
    4353  // Expected number of free suballocations as calculated from traversing their list.
    4354  uint32_t calculatedFreeCount = 0;
    4355  // Expected sum size of free suballocations as calculated from traversing their list.
    4356  VkDeviceSize calculatedSumFreeSize = 0;
    4357  // Expected number of free suballocations that should be registered in
    4358  // m_FreeSuballocationsBySize calculated from traversing their list.
    4359  size_t freeSuballocationsToRegister = 0;
    4360  // True if previous visisted suballocation was free.
    4361  bool prevFree = false;
    4362 
    4363  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    4364  suballocItem != m_Suballocations.cend();
    4365  ++suballocItem)
    4366  {
    4367  const VmaSuballocation& subAlloc = *suballocItem;
    4368 
    4369  // Actual offset of this suballocation doesn't match expected one.
    4370  if(subAlloc.offset != calculatedOffset)
    4371  {
    4372  return false;
    4373  }
    4374 
    4375  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    4376  // Two adjacent free suballocations are invalid. They should be merged.
    4377  if(prevFree && currFree)
    4378  {
    4379  return false;
    4380  }
    4381  prevFree = currFree;
    4382 
    4383  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
    4384  {
    4385  return false;
    4386  }
    4387 
    4388  if(currFree)
    4389  {
    4390  calculatedSumFreeSize += subAlloc.size;
    4391  ++calculatedFreeCount;
    4392  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    4393  {
    4394  ++freeSuballocationsToRegister;
    4395  }
    4396  }
    4397 
    4398  calculatedOffset += subAlloc.size;
    4399  }
    4400 
    4401  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    4402  // match expected one.
    4403  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
    4404  {
    4405  return false;
    4406  }
    4407 
    4408  VkDeviceSize lastSize = 0;
    4409  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    4410  {
    4411  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    4412 
    4413  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    4414  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
    4415  {
    4416  return false;
    4417  }
    4418  // They must be sorted by size ascending.
    4419  if(suballocItem->size < lastSize)
    4420  {
    4421  return false;
    4422  }
    4423 
    4424  lastSize = suballocItem->size;
    4425  }
    4426 
    4427  // Check if totals match calculacted values.
    4428  return
    4429  ValidateFreeSuballocationList() &&
    4430  (calculatedOffset == m_Size) &&
    4431  (calculatedSumFreeSize == m_SumFreeSize) &&
    4432  (calculatedFreeCount == m_FreeCount);
    4433 }
    4434 
    4435 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
    4436 {
    4437  if(!m_FreeSuballocationsBySize.empty())
    4438  {
    4439  return m_FreeSuballocationsBySize.back()->size;
    4440  }
    4441  else
    4442  {
    4443  return 0;
    4444  }
    4445 }
    4446 
    4447 bool VmaBlockMetadata::IsEmpty() const
    4448 {
    4449  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    4450 }
    4451 
    4452 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    4453 {
    4454  outInfo.blockCount = 1;
    4455 
    4456  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    4457  outInfo.allocationCount = rangeCount - m_FreeCount;
    4458  outInfo.unusedRangeCount = m_FreeCount;
    4459 
    4460  outInfo.unusedBytes = m_SumFreeSize;
    4461  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
    4462 
    4463  outInfo.allocationSizeMin = UINT64_MAX;
    4464  outInfo.allocationSizeMax = 0;
    4465  outInfo.unusedRangeSizeMin = UINT64_MAX;
    4466  outInfo.unusedRangeSizeMax = 0;
    4467 
    4468  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    4469  suballocItem != m_Suballocations.cend();
    4470  ++suballocItem)
    4471  {
    4472  const VmaSuballocation& suballoc = *suballocItem;
    4473  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    4474  {
    4475  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    4476  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    4477  }
    4478  else
    4479  {
    4480  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    4481  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    4482  }
    4483  }
    4484 }
    4485 
    4486 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
    4487 {
    4488  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    4489 
    4490  inoutStats.size += m_Size;
    4491  inoutStats.unusedSize += m_SumFreeSize;
    4492  inoutStats.allocationCount += rangeCount - m_FreeCount;
    4493  inoutStats.unusedRangeCount += m_FreeCount;
    4494  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    4495 }
    4496 
    4497 #if VMA_STATS_STRING_ENABLED
    4498 
    4499 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
    4500 {
    4501  json.BeginObject();
    4502 
    4503  json.WriteString("TotalBytes");
    4504  json.WriteNumber(m_Size);
    4505 
    4506  json.WriteString("UnusedBytes");
    4507  json.WriteNumber(m_SumFreeSize);
    4508 
    4509  json.WriteString("Allocations");
    4510  json.WriteNumber(m_Suballocations.size() - m_FreeCount);
    4511 
    4512  json.WriteString("UnusedRanges");
    4513  json.WriteNumber(m_FreeCount);
    4514 
    4515  json.WriteString("Suballocations");
    4516  json.BeginArray();
    4517  size_t i = 0;
    4518  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    4519  suballocItem != m_Suballocations.cend();
    4520  ++suballocItem, ++i)
    4521  {
    4522  json.BeginObject(true);
    4523 
    4524  json.WriteString("Type");
    4525  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
    4526 
    4527  json.WriteString("Size");
    4528  json.WriteNumber(suballocItem->size);
    4529 
    4530  json.WriteString("Offset");
    4531  json.WriteNumber(suballocItem->offset);
    4532 
    4533  json.EndObject();
    4534  }
    4535  json.EndArray();
    4536 
    4537  json.EndObject();
    4538 }
    4539 
    4540 #endif // #if VMA_STATS_STRING_ENABLED
    4541 
    4542 /*
    4543 How many suitable free suballocations to analyze before choosing best one.
    4544 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
    4545  be chosen.
    4546 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
    4547  suballocations will be analized and best one will be chosen.
    4548 - Any other value is also acceptable.
    4549 */
    4550 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
    4551 
    4552 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
    4553 {
    4554  VMA_ASSERT(IsEmpty());
    4555  pAllocationRequest->offset = 0;
    4556  pAllocationRequest->sumFreeSize = m_SumFreeSize;
    4557  pAllocationRequest->sumItemSize = 0;
    4558  pAllocationRequest->item = m_Suballocations.begin();
    4559  pAllocationRequest->itemsToMakeLostCount = 0;
    4560 }
    4561 
    4562 bool VmaBlockMetadata::CreateAllocationRequest(
    4563  uint32_t currentFrameIndex,
    4564  uint32_t frameInUseCount,
    4565  VkDeviceSize bufferImageGranularity,
    4566  VkDeviceSize allocSize,
    4567  VkDeviceSize allocAlignment,
    4568  VmaSuballocationType allocType,
    4569  bool canMakeOtherLost,
    4570  VmaAllocationRequest* pAllocationRequest)
    4571 {
    4572  VMA_ASSERT(allocSize > 0);
    4573  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    4574  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    4575  VMA_HEAVY_ASSERT(Validate());
    4576 
    4577  // There is not enough total free space in this block to fullfill the request: Early return.
    4578  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
    4579  {
    4580  return false;
    4581  }
    4582 
    4583  // New algorithm, efficiently searching freeSuballocationsBySize.
    4584  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    4585  if(freeSuballocCount > 0)
    4586  {
    4587  if(VMA_BEST_FIT)
    4588  {
    4589  // Find first free suballocation with size not less than allocSize.
    4590  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    4591  m_FreeSuballocationsBySize.data(),
    4592  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    4593  allocSize,
    4594  VmaSuballocationItemSizeLess());
    4595  size_t index = it - m_FreeSuballocationsBySize.data();
    4596  for(; index < freeSuballocCount; ++index)
    4597  {
    4598  if(CheckAllocation(
    4599  currentFrameIndex,
    4600  frameInUseCount,
    4601  bufferImageGranularity,
    4602  allocSize,
    4603  allocAlignment,
    4604  allocType,
    4605  m_FreeSuballocationsBySize[index],
    4606  false, // canMakeOtherLost
    4607  &pAllocationRequest->offset,
    4608  &pAllocationRequest->itemsToMakeLostCount,
    4609  &pAllocationRequest->sumFreeSize,
    4610  &pAllocationRequest->sumItemSize))
    4611  {
    4612  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    4613  return true;
    4614  }
    4615  }
    4616  }
    4617  else
    4618  {
    4619  // Search staring from biggest suballocations.
    4620  for(size_t index = freeSuballocCount; index--; )
    4621  {
    4622  if(CheckAllocation(
    4623  currentFrameIndex,
    4624  frameInUseCount,
    4625  bufferImageGranularity,
    4626  allocSize,
    4627  allocAlignment,
    4628  allocType,
    4629  m_FreeSuballocationsBySize[index],
    4630  false, // canMakeOtherLost
    4631  &pAllocationRequest->offset,
    4632  &pAllocationRequest->itemsToMakeLostCount,
    4633  &pAllocationRequest->sumFreeSize,
    4634  &pAllocationRequest->sumItemSize))
    4635  {
    4636  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    4637  return true;
    4638  }
    4639  }
    4640  }
    4641  }
    4642 
    4643  if(canMakeOtherLost)
    4644  {
    4645  // Brute-force algorithm. TODO: Come up with something better.
    4646 
    4647  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
    4648  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
    4649 
    4650  VmaAllocationRequest tmpAllocRequest = {};
    4651  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    4652  suballocIt != m_Suballocations.end();
    4653  ++suballocIt)
    4654  {
    4655  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    4656  suballocIt->hAllocation->CanBecomeLost())
    4657  {
    4658  if(CheckAllocation(
    4659  currentFrameIndex,
    4660  frameInUseCount,
    4661  bufferImageGranularity,
    4662  allocSize,
    4663  allocAlignment,
    4664  allocType,
    4665  suballocIt,
    4666  canMakeOtherLost,
    4667  &tmpAllocRequest.offset,
    4668  &tmpAllocRequest.itemsToMakeLostCount,
    4669  &tmpAllocRequest.sumFreeSize,
    4670  &tmpAllocRequest.sumItemSize))
    4671  {
    4672  tmpAllocRequest.item = suballocIt;
    4673 
    4674  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
    4675  {
    4676  *pAllocationRequest = tmpAllocRequest;
    4677  }
    4678  }
    4679  }
    4680  }
    4681 
    4682  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
    4683  {
    4684  return true;
    4685  }
    4686  }
    4687 
    4688  return false;
    4689 }
    4690 
    4691 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
    4692  uint32_t currentFrameIndex,
    4693  uint32_t frameInUseCount,
    4694  VmaAllocationRequest* pAllocationRequest)
    4695 {
    4696  while(pAllocationRequest->itemsToMakeLostCount > 0)
    4697  {
    4698  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    4699  {
    4700  ++pAllocationRequest->item;
    4701  }
    4702  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    4703  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    4704  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    4705  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    4706  {
    4707  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    4708  --pAllocationRequest->itemsToMakeLostCount;
    4709  }
    4710  else
    4711  {
    4712  return false;
    4713  }
    4714  }
    4715 
    4716  VMA_HEAVY_ASSERT(Validate());
    4717  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    4718  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    4719 
    4720  return true;
    4721 }
    4722 
    4723 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    4724 {
    4725  uint32_t lostAllocationCount = 0;
    4726  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    4727  it != m_Suballocations.end();
    4728  ++it)
    4729  {
    4730  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    4731  it->hAllocation->CanBecomeLost() &&
    4732  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    4733  {
    4734  it = FreeSuballocation(it);
    4735  ++lostAllocationCount;
    4736  }
    4737  }
    4738  return lostAllocationCount;
    4739 }
    4740 
    4741 void VmaBlockMetadata::Alloc(
    4742  const VmaAllocationRequest& request,
    4743  VmaSuballocationType type,
    4744  VkDeviceSize allocSize,
    4745  VmaAllocation hAllocation)
    4746 {
    4747  VMA_ASSERT(request.item != m_Suballocations.end());
    4748  VmaSuballocation& suballoc = *request.item;
    4749  // Given suballocation is a free block.
    4750  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    4751  // Given offset is inside this suballocation.
    4752  VMA_ASSERT(request.offset >= suballoc.offset);
    4753  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    4754  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    4755  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    4756 
    4757  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    4758  // it to become used.
    4759  UnregisterFreeSuballocation(request.item);
    4760 
    4761  suballoc.offset = request.offset;
    4762  suballoc.size = allocSize;
    4763  suballoc.type = type;
    4764  suballoc.hAllocation = hAllocation;
    4765 
    4766  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    4767  if(paddingEnd)
    4768  {
    4769  VmaSuballocation paddingSuballoc = {};
    4770  paddingSuballoc.offset = request.offset + allocSize;
    4771  paddingSuballoc.size = paddingEnd;
    4772  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    4773  VmaSuballocationList::iterator next = request.item;
    4774  ++next;
    4775  const VmaSuballocationList::iterator paddingEndItem =
    4776  m_Suballocations.insert(next, paddingSuballoc);
    4777  RegisterFreeSuballocation(paddingEndItem);
    4778  }
    4779 
    4780  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    4781  if(paddingBegin)
    4782  {
    4783  VmaSuballocation paddingSuballoc = {};
    4784  paddingSuballoc.offset = request.offset - paddingBegin;
    4785  paddingSuballoc.size = paddingBegin;
    4786  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    4787  const VmaSuballocationList::iterator paddingBeginItem =
    4788  m_Suballocations.insert(request.item, paddingSuballoc);
    4789  RegisterFreeSuballocation(paddingBeginItem);
    4790  }
    4791 
    4792  // Update totals.
    4793  m_FreeCount = m_FreeCount - 1;
    4794  if(paddingBegin > 0)
    4795  {
    4796  ++m_FreeCount;
    4797  }
    4798  if(paddingEnd > 0)
    4799  {
    4800  ++m_FreeCount;
    4801  }
    4802  m_SumFreeSize -= allocSize;
    4803 }
    4804 
    4805 void VmaBlockMetadata::Free(const VmaAllocation allocation)
    4806 {
    4807  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    4808  suballocItem != m_Suballocations.end();
    4809  ++suballocItem)
    4810  {
    4811  VmaSuballocation& suballoc = *suballocItem;
    4812  if(suballoc.hAllocation == allocation)
    4813  {
    4814  FreeSuballocation(suballocItem);
    4815  VMA_HEAVY_ASSERT(Validate());
    4816  return;
    4817  }
    4818  }
    4819  VMA_ASSERT(0 && "Not found!");
    4820 }
    4821 
    4822 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
    4823 {
    4824  VkDeviceSize lastSize = 0;
    4825  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    4826  {
    4827  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    4828 
    4829  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    4830  {
    4831  VMA_ASSERT(0);
    4832  return false;
    4833  }
    4834  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    4835  {
    4836  VMA_ASSERT(0);
    4837  return false;
    4838  }
    4839  if(it->size < lastSize)
    4840  {
    4841  VMA_ASSERT(0);
    4842  return false;
    4843  }
    4844 
    4845  lastSize = it->size;
    4846  }
    4847  return true;
    4848 }
    4849 
    4850 bool VmaBlockMetadata::CheckAllocation(
    4851  uint32_t currentFrameIndex,
    4852  uint32_t frameInUseCount,
    4853  VkDeviceSize bufferImageGranularity,
    4854  VkDeviceSize allocSize,
    4855  VkDeviceSize allocAlignment,
    4856  VmaSuballocationType allocType,
    4857  VmaSuballocationList::const_iterator suballocItem,
    4858  bool canMakeOtherLost,
    4859  VkDeviceSize* pOffset,
    4860  size_t* itemsToMakeLostCount,
    4861  VkDeviceSize* pSumFreeSize,
    4862  VkDeviceSize* pSumItemSize) const
    4863 {
    4864  VMA_ASSERT(allocSize > 0);
    4865  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    4866  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    4867  VMA_ASSERT(pOffset != VMA_NULL);
    4868 
    4869  *itemsToMakeLostCount = 0;
    4870  *pSumFreeSize = 0;
    4871  *pSumItemSize = 0;
    4872 
    4873  if(canMakeOtherLost)
    4874  {
    4875  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    4876  {
    4877  *pSumFreeSize = suballocItem->size;
    4878  }
    4879  else
    4880  {
    4881  if(suballocItem->hAllocation->CanBecomeLost() &&
    4882  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    4883  {
    4884  ++*itemsToMakeLostCount;
    4885  *pSumItemSize = suballocItem->size;
    4886  }
    4887  else
    4888  {
    4889  return false;
    4890  }
    4891  }
    4892 
    4893  // Remaining size is too small for this request: Early return.
    4894  if(m_Size - suballocItem->offset < allocSize)
    4895  {
    4896  return false;
    4897  }
    4898 
    4899  // Start from offset equal to beginning of this suballocation.
    4900  *pOffset = suballocItem->offset;
    4901 
    4902  // Apply VMA_DEBUG_MARGIN at the beginning.
    4903  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
    4904  {
    4905  *pOffset += VMA_DEBUG_MARGIN;
    4906  }
    4907 
    4908  // Apply alignment.
    4909  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
    4910  *pOffset = VmaAlignUp(*pOffset, alignment);
    4911 
    4912  // Check previous suballocations for BufferImageGranularity conflicts.
    4913  // Make bigger alignment if necessary.
    4914  if(bufferImageGranularity > 1)
    4915  {
    4916  bool bufferImageGranularityConflict = false;
    4917  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    4918  while(prevSuballocItem != m_Suballocations.cbegin())
    4919  {
    4920  --prevSuballocItem;
    4921  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    4922  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    4923  {
    4924  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    4925  {
    4926  bufferImageGranularityConflict = true;
    4927  break;
    4928  }
    4929  }
    4930  else
    4931  // Already on previous page.
    4932  break;
    4933  }
    4934  if(bufferImageGranularityConflict)
    4935  {
    4936  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    4937  }
    4938  }
    4939 
    4940  // Now that we have final *pOffset, check if we are past suballocItem.
    4941  // If yes, return false - this function should be called for another suballocItem as starting point.
    4942  if(*pOffset >= suballocItem->offset + suballocItem->size)
    4943  {
    4944  return false;
    4945  }
    4946 
    4947  // Calculate padding at the beginning based on current offset.
    4948  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    4949 
    4950  // Calculate required margin at the end if this is not last suballocation.
    4951  VmaSuballocationList::const_iterator next = suballocItem;
    4952  ++next;
    4953  const VkDeviceSize requiredEndMargin =
    4954  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
    4955 
    4956  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    4957  // Another early return check.
    4958  if(suballocItem->offset + totalSize > m_Size)
    4959  {
    4960  return false;
    4961  }
    4962 
    4963  // Advance lastSuballocItem until desired size is reached.
    4964  // Update itemsToMakeLostCount.
    4965  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    4966  if(totalSize > suballocItem->size)
    4967  {
    4968  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    4969  while(remainingSize > 0)
    4970  {
    4971  ++lastSuballocItem;
    4972  if(lastSuballocItem == m_Suballocations.cend())
    4973  {
    4974  return false;
    4975  }
    4976  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    4977  {
    4978  *pSumFreeSize += lastSuballocItem->size;
    4979  }
    4980  else
    4981  {
    4982  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    4983  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    4984  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    4985  {
    4986  ++*itemsToMakeLostCount;
    4987  *pSumItemSize += lastSuballocItem->size;
    4988  }
    4989  else
    4990  {
    4991  return false;
    4992  }
    4993  }
    4994  remainingSize = (lastSuballocItem->size < remainingSize) ?
    4995  remainingSize - lastSuballocItem->size : 0;
    4996  }
    4997  }
    4998 
    4999  // Check next suballocations for BufferImageGranularity conflicts.
    5000  // If conflict exists, we must mark more allocations lost or fail.
    5001  if(bufferImageGranularity > 1)
    5002  {
    5003  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    5004  ++nextSuballocItem;
    5005  while(nextSuballocItem != m_Suballocations.cend())
    5006  {
    5007  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    5008  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    5009  {
    5010  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    5011  {
    5012  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    5013  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    5014  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    5015  {
    5016  ++*itemsToMakeLostCount;
    5017  }
    5018  else
    5019  {
    5020  return false;
    5021  }
    5022  }
    5023  }
    5024  else
    5025  {
    5026  // Already on next page.
    5027  break;
    5028  }
    5029  ++nextSuballocItem;
    5030  }
    5031  }
    5032  }
    5033  else
    5034  {
    5035  const VmaSuballocation& suballoc = *suballocItem;
    5036  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    5037 
    5038  *pSumFreeSize = suballoc.size;
    5039 
    5040  // Size of this suballocation is too small for this request: Early return.
    5041  if(suballoc.size < allocSize)
    5042  {
    5043  return false;
    5044  }
    5045 
    5046  // Start from offset equal to beginning of this suballocation.
    5047  *pOffset = suballoc.offset;
    5048 
    5049  // Apply VMA_DEBUG_MARGIN at the beginning.
    5050  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
    5051  {
    5052  *pOffset += VMA_DEBUG_MARGIN;
    5053  }
    5054 
    5055  // Apply alignment.
    5056  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
    5057  *pOffset = VmaAlignUp(*pOffset, alignment);
    5058 
    5059  // Check previous suballocations for BufferImageGranularity conflicts.
    5060  // Make bigger alignment if necessary.
    5061  if(bufferImageGranularity > 1)
    5062  {
    5063  bool bufferImageGranularityConflict = false;
    5064  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    5065  while(prevSuballocItem != m_Suballocations.cbegin())
    5066  {
    5067  --prevSuballocItem;
    5068  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    5069  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    5070  {
    5071  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    5072  {
    5073  bufferImageGranularityConflict = true;
    5074  break;
    5075  }
    5076  }
    5077  else
    5078  // Already on previous page.
    5079  break;
    5080  }
    5081  if(bufferImageGranularityConflict)
    5082  {
    5083  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    5084  }
    5085  }
    5086 
    5087  // Calculate padding at the beginning based on current offset.
    5088  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    5089 
    5090  // Calculate required margin at the end if this is not last suballocation.
    5091  VmaSuballocationList::const_iterator next = suballocItem;
    5092  ++next;
    5093  const VkDeviceSize requiredEndMargin =
    5094  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
    5095 
    5096  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    5097  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    5098  {
    5099  return false;
    5100  }
    5101 
    5102  // Check next suballocations for BufferImageGranularity conflicts.
    5103  // If conflict exists, allocation cannot be made here.
    5104  if(bufferImageGranularity > 1)
    5105  {
    5106  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    5107  ++nextSuballocItem;
    5108  while(nextSuballocItem != m_Suballocations.cend())
    5109  {
    5110  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    5111  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    5112  {
    5113  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    5114  {
    5115  return false;
    5116  }
    5117  }
    5118  else
    5119  {
    5120  // Already on next page.
    5121  break;
    5122  }
    5123  ++nextSuballocItem;
    5124  }
    5125  }
    5126  }
    5127 
    5128  // All tests passed: Success. pOffset is already filled.
    5129  return true;
    5130 }
    5131 
    5132 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
    5133 {
    5134  VMA_ASSERT(item != m_Suballocations.end());
    5135  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    5136 
    5137  VmaSuballocationList::iterator nextItem = item;
    5138  ++nextItem;
    5139  VMA_ASSERT(nextItem != m_Suballocations.end());
    5140  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    5141 
    5142  item->size += nextItem->size;
    5143  --m_FreeCount;
    5144  m_Suballocations.erase(nextItem);
    5145 }
    5146 
    5147 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    5148 {
    5149  // Change this suballocation to be marked as free.
    5150  VmaSuballocation& suballoc = *suballocItem;
    5151  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    5152  suballoc.hAllocation = VK_NULL_HANDLE;
    5153 
    5154  // Update totals.
    5155  ++m_FreeCount;
    5156  m_SumFreeSize += suballoc.size;
    5157 
    5158  // Merge with previous and/or next suballocation if it's also free.
    5159  bool mergeWithNext = false;
    5160  bool mergeWithPrev = false;
    5161 
    5162  VmaSuballocationList::iterator nextItem = suballocItem;
    5163  ++nextItem;
    5164  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    5165  {
    5166  mergeWithNext = true;
    5167  }
    5168 
    5169  VmaSuballocationList::iterator prevItem = suballocItem;
    5170  if(suballocItem != m_Suballocations.begin())
    5171  {
    5172  --prevItem;
    5173  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    5174  {
    5175  mergeWithPrev = true;
    5176  }
    5177  }
    5178 
    5179  if(mergeWithNext)
    5180  {
    5181  UnregisterFreeSuballocation(nextItem);
    5182  MergeFreeWithNext(suballocItem);
    5183  }
    5184 
    5185  if(mergeWithPrev)
    5186  {
    5187  UnregisterFreeSuballocation(prevItem);
    5188  MergeFreeWithNext(prevItem);
    5189  RegisterFreeSuballocation(prevItem);
    5190  return prevItem;
    5191  }
    5192  else
    5193  {
    5194  RegisterFreeSuballocation(suballocItem);
    5195  return suballocItem;
    5196  }
    5197 }
    5198 
    5199 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    5200 {
    5201  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    5202  VMA_ASSERT(item->size > 0);
    5203 
    5204  // You may want to enable this validation at the beginning or at the end of
    5205  // this function, depending on what do you want to check.
    5206  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    5207 
    5208  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    5209  {
    5210  if(m_FreeSuballocationsBySize.empty())
    5211  {
    5212  m_FreeSuballocationsBySize.push_back(item);
    5213  }
    5214  else
    5215  {
    5216  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    5217  }
    5218  }
    5219 
    5220  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    5221 }
    5222 
    5223 
    5224 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    5225 {
    5226  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    5227  VMA_ASSERT(item->size > 0);
    5228 
    5229  // You may want to enable this validation at the beginning or at the end of
    5230  // this function, depending on what do you want to check.
    5231  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    5232 
    5233  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    5234  {
    5235  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    5236  m_FreeSuballocationsBySize.data(),
    5237  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    5238  item,
    5239  VmaSuballocationItemSizeLess());
    5240  for(size_t index = it - m_FreeSuballocationsBySize.data();
    5241  index < m_FreeSuballocationsBySize.size();
    5242  ++index)
    5243  {
    5244  if(m_FreeSuballocationsBySize[index] == item)
    5245  {
    5246  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    5247  return;
    5248  }
    5249  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    5250  }
    5251  VMA_ASSERT(0 && "Not found.");
    5252  }
    5253 
    5254  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    5255 }
    5256 
    5258 // class VmaDeviceMemoryBlock
    5259 
    5260 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    5261  m_MemoryTypeIndex(UINT32_MAX),
    5262  m_BlockVectorType(VMA_BLOCK_VECTOR_TYPE_COUNT),
    5263  m_hMemory(VK_NULL_HANDLE),
    5264  m_PersistentMap(false),
    5265  m_pMappedData(VMA_NULL),
    5266  m_Metadata(hAllocator)
    5267 {
    5268 }
    5269 
    5270 void VmaDeviceMemoryBlock::Init(
    5271  uint32_t newMemoryTypeIndex,
    5272  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
    5273  VkDeviceMemory newMemory,
    5274  VkDeviceSize newSize,
    5275  bool persistentMap,
    5276  void* pMappedData)
    5277 {
    5278  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5279 
    5280  m_MemoryTypeIndex = newMemoryTypeIndex;
    5281  m_BlockVectorType = newBlockVectorType;
    5282  m_hMemory = newMemory;
    5283  m_PersistentMap = persistentMap;
    5284  m_pMappedData = pMappedData;
    5285 
    5286  m_Metadata.Init(newSize);
    5287 }
    5288 
    5289 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    5290 {
    5291  // This is the most important assert in the entire library.
    5292  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    5293  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    5294 
    5295  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    5296  if(m_pMappedData != VMA_NULL)
    5297  {
    5298  (allocator->GetVulkanFunctions().vkUnmapMemory)(allocator->m_hDevice, m_hMemory);
    5299  m_pMappedData = VMA_NULL;
    5300  }
    5301 
    5302  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
    5303  m_hMemory = VK_NULL_HANDLE;
    5304 }
    5305 
    5306 bool VmaDeviceMemoryBlock::Validate() const
    5307 {
    5308  if((m_hMemory == VK_NULL_HANDLE) ||
    5309  (m_Metadata.GetSize() == 0))
    5310  {
    5311  return false;
    5312  }
    5313 
    5314  return m_Metadata.Validate();
    5315 }
    5316 
    5317 static void InitStatInfo(VmaStatInfo& outInfo)
    5318 {
    5319  memset(&outInfo, 0, sizeof(outInfo));
    5320  outInfo.allocationSizeMin = UINT64_MAX;
    5321  outInfo.unusedRangeSizeMin = UINT64_MAX;
    5322 }
    5323 
    5324 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    5325 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    5326 {
    5327  inoutInfo.blockCount += srcInfo.blockCount;
    5328  inoutInfo.allocationCount += srcInfo.allocationCount;
    5329  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    5330  inoutInfo.usedBytes += srcInfo.usedBytes;
    5331  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    5332  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    5333  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    5334  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    5335  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    5336 }
    5337 
    5338 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    5339 {
    5340  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    5341  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    5342  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    5343  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    5344 }
    5345 
    5346 VmaPool_T::VmaPool_T(
    5347  VmaAllocator hAllocator,
    5348  const VmaPoolCreateInfo& createInfo) :
    5349  m_BlockVector(
    5350  hAllocator,
    5351  createInfo.memoryTypeIndex,
    5352  (createInfo.flags & VMA_POOL_CREATE_PERSISTENT_MAP_BIT) != 0 ?
    5353  VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED,
    5354  createInfo.blockSize,
    5355  createInfo.minBlockCount,
    5356  createInfo.maxBlockCount,
    5357  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    5358  createInfo.frameInUseCount,
    5359  true) // isCustomPool
    5360 {
    5361 }
    5362 
    5363 VmaPool_T::~VmaPool_T()
    5364 {
    5365 }
    5366 
    5367 #if VMA_STATS_STRING_ENABLED
    5368 
    5369 #endif // #if VMA_STATS_STRING_ENABLED
    5370 
    5371 VmaBlockVector::VmaBlockVector(
    5372  VmaAllocator hAllocator,
    5373  uint32_t memoryTypeIndex,
    5374  VMA_BLOCK_VECTOR_TYPE blockVectorType,
    5375  VkDeviceSize preferredBlockSize,
    5376  size_t minBlockCount,
    5377  size_t maxBlockCount,
    5378  VkDeviceSize bufferImageGranularity,
    5379  uint32_t frameInUseCount,
    5380  bool isCustomPool) :
    5381  m_hAllocator(hAllocator),
    5382  m_MemoryTypeIndex(memoryTypeIndex),
    5383  m_BlockVectorType(blockVectorType),
    5384  m_PreferredBlockSize(preferredBlockSize),
    5385  m_MinBlockCount(minBlockCount),
    5386  m_MaxBlockCount(maxBlockCount),
    5387  m_BufferImageGranularity(bufferImageGranularity),
    5388  m_FrameInUseCount(frameInUseCount),
    5389  m_IsCustomPool(isCustomPool),
    5390  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    5391  m_HasEmptyBlock(false),
    5392  m_pDefragmentator(VMA_NULL)
    5393 {
    5394 }
    5395 
    5396 VmaBlockVector::~VmaBlockVector()
    5397 {
    5398  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
    5399 
    5400  for(size_t i = m_Blocks.size(); i--; )
    5401  {
    5402  m_Blocks[i]->Destroy(m_hAllocator);
    5403  vma_delete(m_hAllocator, m_Blocks[i]);
    5404  }
    5405 }
    5406 
    5407 VkResult VmaBlockVector::CreateMinBlocks()
    5408 {
    5409  for(size_t i = 0; i < m_MinBlockCount; ++i)
    5410  {
    5411  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    5412  if(res != VK_SUCCESS)
    5413  {
    5414  return res;
    5415  }
    5416  }
    5417  return VK_SUCCESS;
    5418 }
    5419 
    5420 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    5421 {
    5422  pStats->size = 0;
    5423  pStats->unusedSize = 0;
    5424  pStats->allocationCount = 0;
    5425  pStats->unusedRangeCount = 0;
    5426  pStats->unusedRangeSizeMax = 0;
    5427 
    5428  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5429 
    5430  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    5431  {
    5432  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    5433  VMA_ASSERT(pBlock);
    5434  VMA_HEAVY_ASSERT(pBlock->Validate());
    5435  pBlock->m_Metadata.AddPoolStats(*pStats);
    5436  }
    5437 }
    5438 
    5439 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    5440 
    5441 VkResult VmaBlockVector::Allocate(
    5442  VmaPool hCurrentPool,
    5443  uint32_t currentFrameIndex,
    5444  const VkMemoryRequirements& vkMemReq,
    5445  const VmaAllocationCreateInfo& createInfo,
    5446  VmaSuballocationType suballocType,
    5447  VmaAllocation* pAllocation)
    5448 {
    5449  // Validate flags.
    5450  if(createInfo.pool != VK_NULL_HANDLE &&
    5451  ((createInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0) != (m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED))
    5452  {
    5453  VMA_ASSERT(0 && "Usage of VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT must match VMA_POOL_CREATE_PERSISTENT_MAP_BIT.");
    5454  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    5455  }
    5456 
    5457  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5458 
    5459  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    5460  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    5461  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    5462  {
    5463  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    5464  VMA_ASSERT(pCurrBlock);
    5465  VmaAllocationRequest currRequest = {};
    5466  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
    5467  currentFrameIndex,
    5468  m_FrameInUseCount,
    5469  m_BufferImageGranularity,
    5470  vkMemReq.size,
    5471  vkMemReq.alignment,
    5472  suballocType,
    5473  false, // canMakeOtherLost
    5474  &currRequest))
    5475  {
    5476  // Allocate from pCurrBlock.
    5477  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    5478 
    5479  // We no longer have an empty Allocation.
    5480  if(pCurrBlock->m_Metadata.IsEmpty())
    5481  {
    5482  m_HasEmptyBlock = false;
    5483  }
    5484 
    5485  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
    5486  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
    5487  (*pAllocation)->InitBlockAllocation(
    5488  hCurrentPool,
    5489  pCurrBlock,
    5490  currRequest.offset,
    5491  vkMemReq.alignment,
    5492  vkMemReq.size,
    5493  suballocType,
    5494  createInfo.pUserData,
    5495  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    5496  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
    5497  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    5498  return VK_SUCCESS;
    5499  }
    5500  }
    5501 
    5502  const bool canCreateNewBlock =
    5503  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    5504  (m_Blocks.size() < m_MaxBlockCount);
    5505 
    5506  // 2. Try to create new block.
    5507  if(canCreateNewBlock)
    5508  {
    5509  // 2.1. Start with full preferredBlockSize.
    5510  VkDeviceSize blockSize = m_PreferredBlockSize;
    5511  size_t newBlockIndex = 0;
    5512  VkResult res = CreateBlock(blockSize, &newBlockIndex);
    5513  // Allocating blocks of other sizes is allowed only in default pools.
    5514  // In custom pools block size is fixed.
    5515  if(res < 0 && m_IsCustomPool == false)
    5516  {
    5517  // 2.2. Try half the size.
    5518  blockSize /= 2;
    5519  if(blockSize >= vkMemReq.size)
    5520  {
    5521  res = CreateBlock(blockSize, &newBlockIndex);
    5522  if(res < 0)
    5523  {
    5524  // 2.3. Try quarter the size.
    5525  blockSize /= 2;
    5526  if(blockSize >= vkMemReq.size)
    5527  {
    5528  res = CreateBlock(blockSize, &newBlockIndex);
    5529  }
    5530  }
    5531  }
    5532  }
    5533  if(res == VK_SUCCESS)
    5534  {
    5535  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    5536  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= vkMemReq.size);
    5537 
    5538  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
    5539  VmaAllocationRequest allocRequest;
    5540  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
    5541  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
    5542  pBlock->m_Metadata.Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
    5543  (*pAllocation)->InitBlockAllocation(
    5544  hCurrentPool,
    5545  pBlock,
    5546  allocRequest.offset,
    5547  vkMemReq.alignment,
    5548  vkMemReq.size,
    5549  suballocType,
    5550  createInfo.pUserData,
    5551  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    5552  VMA_HEAVY_ASSERT(pBlock->Validate());
    5553  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
    5554 
    5555  return VK_SUCCESS;
    5556  }
    5557  }
    5558 
    5559  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    5560 
    5561  // 3. Try to allocate from existing blocks with making other allocations lost.
    5562  if(canMakeOtherLost)
    5563  {
    5564  uint32_t tryIndex = 0;
    5565  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    5566  {
    5567  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    5568  VmaAllocationRequest bestRequest = {};
    5569  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    5570 
    5571  // 1. Search existing allocations.
    5572  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    5573  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    5574  {
    5575  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    5576  VMA_ASSERT(pCurrBlock);
    5577  VmaAllocationRequest currRequest = {};
    5578  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
    5579  currentFrameIndex,
    5580  m_FrameInUseCount,
    5581  m_BufferImageGranularity,
    5582  vkMemReq.size,
    5583  vkMemReq.alignment,
    5584  suballocType,
    5585  canMakeOtherLost,
    5586  &currRequest))
    5587  {
    5588  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    5589  if(pBestRequestBlock == VMA_NULL ||
    5590  currRequestCost < bestRequestCost)
    5591  {
    5592  pBestRequestBlock = pCurrBlock;
    5593  bestRequest = currRequest;
    5594  bestRequestCost = currRequestCost;
    5595 
    5596  if(bestRequestCost == 0)
    5597  {
    5598  break;
    5599  }
    5600  }
    5601  }
    5602  }
    5603 
    5604  if(pBestRequestBlock != VMA_NULL)
    5605  {
    5606  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
    5607  currentFrameIndex,
    5608  m_FrameInUseCount,
    5609  &bestRequest))
    5610  {
    5611  // We no longer have an empty Allocation.
    5612  if(pBestRequestBlock->m_Metadata.IsEmpty())
    5613  {
    5614  m_HasEmptyBlock = false;
    5615  }
    5616  // Allocate from this pBlock.
    5617  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
    5618  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
    5619  (*pAllocation)->InitBlockAllocation(
    5620  hCurrentPool,
    5621  pBestRequestBlock,
    5622  bestRequest.offset,
    5623  vkMemReq.alignment,
    5624  vkMemReq.size,
    5625  suballocType,
    5626  createInfo.pUserData,
    5627  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    5628  VMA_HEAVY_ASSERT(pBlock->Validate());
    5629  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    5630  return VK_SUCCESS;
    5631  }
    5632  // else: Some allocations must have been touched while we are here. Next try.
    5633  }
    5634  else
    5635  {
    5636  // Could not find place in any of the blocks - break outer loop.
    5637  break;
    5638  }
    5639  }
    5640  /* Maximum number of tries exceeded - a very unlike event when many other
    5641  threads are simultaneously touching allocations making it impossible to make
    5642  lost at the same time as we try to allocate. */
    5643  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    5644  {
    5645  return VK_ERROR_TOO_MANY_OBJECTS;
    5646  }
    5647  }
    5648 
    5649  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    5650 }
    5651 
    5652 void VmaBlockVector::Free(
    5653  VmaAllocation hAllocation)
    5654 {
    5655  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    5656 
    5657  // Scope for lock.
    5658  {
    5659  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5660 
    5661  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    5662 
    5663  pBlock->m_Metadata.Free(hAllocation);
    5664  VMA_HEAVY_ASSERT(pBlock->Validate());
    5665 
    5666  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
    5667 
    5668  // pBlock became empty after this deallocation.
    5669  if(pBlock->m_Metadata.IsEmpty())
    5670  {
    5671  // Already has empty Allocation. We don't want to have two, so delete this one.
    5672  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    5673  {
    5674  pBlockToDelete = pBlock;
    5675  Remove(pBlock);
    5676  }
    5677  // We now have first empty Allocation.
    5678  else
    5679  {
    5680  m_HasEmptyBlock = true;
    5681  }
    5682  }
    5683  // pBlock didn't become empty, but we have another empty block - find and free that one.
    5684  // (This is optional, heuristics.)
    5685  else if(m_HasEmptyBlock)
    5686  {
    5687  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    5688  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    5689  {
    5690  pBlockToDelete = pLastBlock;
    5691  m_Blocks.pop_back();
    5692  m_HasEmptyBlock = false;
    5693  }
    5694  }
    5695 
    5696  IncrementallySortBlocks();
    5697  }
    5698 
    5699  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    5700  // lock, for performance reason.
    5701  if(pBlockToDelete != VMA_NULL)
    5702  {
    5703  VMA_DEBUG_LOG(" Deleted empty allocation");
    5704  pBlockToDelete->Destroy(m_hAllocator);
    5705  vma_delete(m_hAllocator, pBlockToDelete);
    5706  }
    5707 }
    5708 
    5709 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    5710 {
    5711  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    5712  {
    5713  if(m_Blocks[blockIndex] == pBlock)
    5714  {
    5715  VmaVectorRemove(m_Blocks, blockIndex);
    5716  return;
    5717  }
    5718  }
    5719  VMA_ASSERT(0);
    5720 }
    5721 
    5722 void VmaBlockVector::IncrementallySortBlocks()
    5723 {
    5724  // Bubble sort only until first swap.
    5725  for(size_t i = 1; i < m_Blocks.size(); ++i)
    5726  {
    5727  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
    5728  {
    5729  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    5730  return;
    5731  }
    5732  }
    5733 }
    5734 
    5735 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    5736 {
    5737  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    5738  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    5739  allocInfo.allocationSize = blockSize;
    5740  VkDeviceMemory mem = VK_NULL_HANDLE;
    5741  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    5742  if(res < 0)
    5743  {
    5744  return res;
    5745  }
    5746 
    5747  // New VkDeviceMemory successfully created.
    5748 
    5749  // Map memory if needed.
    5750  void* pMappedData = VMA_NULL;
    5751  const bool persistentMap = (m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED);
    5752  if(persistentMap && m_hAllocator->m_UnmapPersistentlyMappedMemoryCounter == 0)
    5753  {
    5754  res = (*m_hAllocator->GetVulkanFunctions().vkMapMemory)(
    5755  m_hAllocator->m_hDevice,
    5756  mem,
    5757  0,
    5758  VK_WHOLE_SIZE,
    5759  0,
    5760  &pMappedData);
    5761  if(res < 0)
    5762  {
    5763  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    5764  m_hAllocator->FreeVulkanMemory(m_MemoryTypeIndex, blockSize, mem);
    5765  return res;
    5766  }
    5767  }
    5768 
    5769  // Create new Allocation for it.
    5770  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    5771  pBlock->Init(
    5772  m_MemoryTypeIndex,
    5773  (VMA_BLOCK_VECTOR_TYPE)m_BlockVectorType,
    5774  mem,
    5775  allocInfo.allocationSize,
    5776  persistentMap,
    5777  pMappedData);
    5778 
    5779  m_Blocks.push_back(pBlock);
    5780  if(pNewBlockIndex != VMA_NULL)
    5781  {
    5782  *pNewBlockIndex = m_Blocks.size() - 1;
    5783  }
    5784 
    5785  return VK_SUCCESS;
    5786 }
    5787 
    5788 #if VMA_STATS_STRING_ENABLED
    5789 
    5790 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    5791 {
    5792  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5793 
    5794  json.BeginObject();
    5795 
    5796  if(m_IsCustomPool)
    5797  {
    5798  json.WriteString("MemoryTypeIndex");
    5799  json.WriteNumber(m_MemoryTypeIndex);
    5800 
    5801  if(m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
    5802  {
    5803  json.WriteString("Mapped");
    5804  json.WriteBool(true);
    5805  }
    5806 
    5807  json.WriteString("BlockSize");
    5808  json.WriteNumber(m_PreferredBlockSize);
    5809 
    5810  json.WriteString("BlockCount");
    5811  json.BeginObject(true);
    5812  if(m_MinBlockCount > 0)
    5813  {
    5814  json.WriteString("Min");
    5815  json.WriteNumber(m_MinBlockCount);
    5816  }
    5817  if(m_MaxBlockCount < SIZE_MAX)
    5818  {
    5819  json.WriteString("Max");
    5820  json.WriteNumber(m_MaxBlockCount);
    5821  }
    5822  json.WriteString("Cur");
    5823  json.WriteNumber(m_Blocks.size());
    5824  json.EndObject();
    5825 
    5826  if(m_FrameInUseCount > 0)
    5827  {
    5828  json.WriteString("FrameInUseCount");
    5829  json.WriteNumber(m_FrameInUseCount);
    5830  }
    5831  }
    5832  else
    5833  {
    5834  json.WriteString("PreferredBlockSize");
    5835  json.WriteNumber(m_PreferredBlockSize);
    5836  }
    5837 
    5838  json.WriteString("Blocks");
    5839  json.BeginArray();
    5840  for(size_t i = 0; i < m_Blocks.size(); ++i)
    5841  {
    5842  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
    5843  }
    5844  json.EndArray();
    5845 
    5846  json.EndObject();
    5847 }
    5848 
    5849 #endif // #if VMA_STATS_STRING_ENABLED
    5850 
    5851 void VmaBlockVector::UnmapPersistentlyMappedMemory()
    5852 {
    5853  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5854 
    5855  for(size_t i = m_Blocks.size(); i--; )
    5856  {
    5857  VmaDeviceMemoryBlock* pBlock = m_Blocks[i];
    5858  if(pBlock->m_pMappedData != VMA_NULL)
    5859  {
    5860  VMA_ASSERT(pBlock->m_PersistentMap != false);
    5861  (m_hAllocator->GetVulkanFunctions().vkUnmapMemory)(m_hAllocator->m_hDevice, pBlock->m_hMemory);
    5862  pBlock->m_pMappedData = VMA_NULL;
    5863  }
    5864  }
    5865 }
    5866 
    5867 VkResult VmaBlockVector::MapPersistentlyMappedMemory()
    5868 {
    5869  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5870 
    5871  VkResult finalResult = VK_SUCCESS;
    5872  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
    5873  {
    5874  VmaDeviceMemoryBlock* pBlock = m_Blocks[i];
    5875  if(pBlock->m_PersistentMap)
    5876  {
    5877  VMA_ASSERT(pBlock->m_pMappedData == nullptr);
    5878  VkResult localResult = (*m_hAllocator->GetVulkanFunctions().vkMapMemory)(
    5879  m_hAllocator->m_hDevice,
    5880  pBlock->m_hMemory,
    5881  0,
    5882  VK_WHOLE_SIZE,
    5883  0,
    5884  &pBlock->m_pMappedData);
    5885  if(localResult != VK_SUCCESS)
    5886  {
    5887  finalResult = localResult;
    5888  }
    5889  }
    5890  }
    5891  return finalResult;
    5892 }
    5893 
    5894 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
    5895  VmaAllocator hAllocator,
    5896  uint32_t currentFrameIndex)
    5897 {
    5898  if(m_pDefragmentator == VMA_NULL)
    5899  {
    5900  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
    5901  hAllocator,
    5902  this,
    5903  currentFrameIndex);
    5904  }
    5905 
    5906  return m_pDefragmentator;
    5907 }
    5908 
    5909 VkResult VmaBlockVector::Defragment(
    5910  VmaDefragmentationStats* pDefragmentationStats,
    5911  VkDeviceSize& maxBytesToMove,
    5912  uint32_t& maxAllocationsToMove)
    5913 {
    5914  if(m_pDefragmentator == VMA_NULL)
    5915  {
    5916  return VK_SUCCESS;
    5917  }
    5918 
    5919  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5920 
    5921  // Defragment.
    5922  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
    5923 
    5924  // Accumulate statistics.
    5925  if(pDefragmentationStats != VMA_NULL)
    5926  {
    5927  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
    5928  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
    5929  pDefragmentationStats->bytesMoved += bytesMoved;
    5930  pDefragmentationStats->allocationsMoved += allocationsMoved;
    5931  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    5932  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    5933  maxBytesToMove -= bytesMoved;
    5934  maxAllocationsToMove -= allocationsMoved;
    5935  }
    5936 
    5937  // Free empty blocks.
    5938  m_HasEmptyBlock = false;
    5939  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    5940  {
    5941  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    5942  if(pBlock->m_Metadata.IsEmpty())
    5943  {
    5944  if(m_Blocks.size() > m_MinBlockCount)
    5945  {
    5946  if(pDefragmentationStats != VMA_NULL)
    5947  {
    5948  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    5949  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
    5950  }
    5951 
    5952  VmaVectorRemove(m_Blocks, blockIndex);
    5953  pBlock->Destroy(m_hAllocator);
    5954  vma_delete(m_hAllocator, pBlock);
    5955  }
    5956  else
    5957  {
    5958  m_HasEmptyBlock = true;
    5959  }
    5960  }
    5961  }
    5962 
    5963  return result;
    5964 }
    5965 
    5966 void VmaBlockVector::DestroyDefragmentator()
    5967 {
    5968  if(m_pDefragmentator != VMA_NULL)
    5969  {
    5970  vma_delete(m_hAllocator, m_pDefragmentator);
    5971  m_pDefragmentator = VMA_NULL;
    5972  }
    5973 }
    5974 
    5975 void VmaBlockVector::MakePoolAllocationsLost(
    5976  uint32_t currentFrameIndex,
    5977  size_t* pLostAllocationCount)
    5978 {
    5979  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5980 
    5981  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    5982  {
    5983  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    5984  VMA_ASSERT(pBlock);
    5985  pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    5986  }
    5987 }
    5988 
    5989 void VmaBlockVector::AddStats(VmaStats* pStats)
    5990 {
    5991  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    5992  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    5993 
    5994  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5995 
    5996  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    5997  {
    5998  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    5999  VMA_ASSERT(pBlock);
    6000  VMA_HEAVY_ASSERT(pBlock->Validate());
    6001  VmaStatInfo allocationStatInfo;
    6002  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
    6003  VmaAddStatInfo(pStats->total, allocationStatInfo);
    6004  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    6005  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    6006  }
    6007 }
    6008 
    6010 // VmaDefragmentator members definition
    6011 
    6012 VmaDefragmentator::VmaDefragmentator(
    6013  VmaAllocator hAllocator,
    6014  VmaBlockVector* pBlockVector,
    6015  uint32_t currentFrameIndex) :
    6016  m_hAllocator(hAllocator),
    6017  m_pBlockVector(pBlockVector),
    6018  m_CurrentFrameIndex(currentFrameIndex),
    6019  m_BytesMoved(0),
    6020  m_AllocationsMoved(0),
    6021  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
    6022  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    6023 {
    6024 }
    6025 
    6026 VmaDefragmentator::~VmaDefragmentator()
    6027 {
    6028  for(size_t i = m_Blocks.size(); i--; )
    6029  {
    6030  vma_delete(m_hAllocator, m_Blocks[i]);
    6031  }
    6032 }
    6033 
    6034 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    6035 {
    6036  AllocationInfo allocInfo;
    6037  allocInfo.m_hAllocation = hAlloc;
    6038  allocInfo.m_pChanged = pChanged;
    6039  m_Allocations.push_back(allocInfo);
    6040 }
    6041 
    6042 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
    6043 {
    6044  // It has already been mapped for defragmentation.
    6045  if(m_pMappedDataForDefragmentation)
    6046  {
    6047  *ppMappedData = m_pMappedDataForDefragmentation;
    6048  return VK_SUCCESS;
    6049  }
    6050 
    6051  // It is persistently mapped.
    6052  if(m_pBlock->m_PersistentMap)
    6053  {
    6054  VMA_ASSERT(m_pBlock->m_pMappedData != VMA_NULL);
    6055  *ppMappedData = m_pBlock->m_pMappedData;
    6056  return VK_SUCCESS;
    6057  }
    6058 
    6059  // Map on first usage.
    6060  VkResult res = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    6061  hAllocator->m_hDevice,
    6062  m_pBlock->m_hMemory,
    6063  0,
    6064  VK_WHOLE_SIZE,
    6065  0,
    6066  &m_pMappedDataForDefragmentation);
    6067  *ppMappedData = m_pMappedDataForDefragmentation;
    6068  return res;
    6069 }
    6070 
    6071 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
    6072 {
    6073  if(m_pMappedDataForDefragmentation != VMA_NULL)
    6074  {
    6075  (hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_pBlock->m_hMemory);
    6076  }
    6077 }
    6078 
    6079 VkResult VmaDefragmentator::DefragmentRound(
    6080  VkDeviceSize maxBytesToMove,
    6081  uint32_t maxAllocationsToMove)
    6082 {
    6083  if(m_Blocks.empty())
    6084  {
    6085  return VK_SUCCESS;
    6086  }
    6087 
    6088  size_t srcBlockIndex = m_Blocks.size() - 1;
    6089  size_t srcAllocIndex = SIZE_MAX;
    6090  for(;;)
    6091  {
    6092  // 1. Find next allocation to move.
    6093  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    6094  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
    6095  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    6096  {
    6097  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    6098  {
    6099  // Finished: no more allocations to process.
    6100  if(srcBlockIndex == 0)
    6101  {
    6102  return VK_SUCCESS;
    6103  }
    6104  else
    6105  {
    6106  --srcBlockIndex;
    6107  srcAllocIndex = SIZE_MAX;
    6108  }
    6109  }
    6110  else
    6111  {
    6112  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    6113  }
    6114  }
    6115 
    6116  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    6117  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    6118 
    6119  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    6120  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    6121  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    6122  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    6123 
    6124  // 2. Try to find new place for this allocation in preceding or current block.
    6125  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    6126  {
    6127  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    6128  VmaAllocationRequest dstAllocRequest;
    6129  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
    6130  m_CurrentFrameIndex,
    6131  m_pBlockVector->GetFrameInUseCount(),
    6132  m_pBlockVector->GetBufferImageGranularity(),
    6133  size,
    6134  alignment,
    6135  suballocType,
    6136  false, // canMakeOtherLost
    6137  &dstAllocRequest) &&
    6138  MoveMakesSense(
    6139  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    6140  {
    6141  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    6142 
    6143  // Reached limit on number of allocations or bytes to move.
    6144  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    6145  (m_BytesMoved + size > maxBytesToMove))
    6146  {
    6147  return VK_INCOMPLETE;
    6148  }
    6149 
    6150  void* pDstMappedData = VMA_NULL;
    6151  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
    6152  if(res != VK_SUCCESS)
    6153  {
    6154  return res;
    6155  }
    6156 
    6157  void* pSrcMappedData = VMA_NULL;
    6158  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
    6159  if(res != VK_SUCCESS)
    6160  {
    6161  return res;
    6162  }
    6163 
    6164  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    6165  memcpy(
    6166  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
    6167  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
    6168  static_cast<size_t>(size));
    6169 
    6170  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
    6171  pSrcBlockInfo->m_pBlock->m_Metadata.Free(allocInfo.m_hAllocation);
    6172 
    6173  allocInfo.m_hAllocation->ChangeBlockAllocation(pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    6174 
    6175  if(allocInfo.m_pChanged != VMA_NULL)
    6176  {
    6177  *allocInfo.m_pChanged = VK_TRUE;
    6178  }
    6179 
    6180  ++m_AllocationsMoved;
    6181  m_BytesMoved += size;
    6182 
    6183  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    6184 
    6185  break;
    6186  }
    6187  }
    6188 
    6189  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    6190 
    6191  if(srcAllocIndex > 0)
    6192  {
    6193  --srcAllocIndex;
    6194  }
    6195  else
    6196  {
    6197  if(srcBlockIndex > 0)
    6198  {
    6199  --srcBlockIndex;
    6200  srcAllocIndex = SIZE_MAX;
    6201  }
    6202  else
    6203  {
    6204  return VK_SUCCESS;
    6205  }
    6206  }
    6207  }
    6208 }
    6209 
    6210 VkResult VmaDefragmentator::Defragment(
    6211  VkDeviceSize maxBytesToMove,
    6212  uint32_t maxAllocationsToMove)
    6213 {
    6214  if(m_Allocations.empty())
    6215  {
    6216  return VK_SUCCESS;
    6217  }
    6218 
    6219  // Create block info for each block.
    6220  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    6221  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    6222  {
    6223  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    6224  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    6225  m_Blocks.push_back(pBlockInfo);
    6226  }
    6227 
    6228  // Sort them by m_pBlock pointer value.
    6229  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    6230 
    6231  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
    6232  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
    6233  {
    6234  AllocationInfo& allocInfo = m_Allocations[blockIndex];
    6235  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    6236  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    6237  {
    6238  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
    6239  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    6240  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    6241  {
    6242  (*it)->m_Allocations.push_back(allocInfo);
    6243  }
    6244  else
    6245  {
    6246  VMA_ASSERT(0);
    6247  }
    6248  }
    6249  }
    6250  m_Allocations.clear();
    6251 
    6252  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    6253  {
    6254  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    6255  pBlockInfo->CalcHasNonMovableAllocations();
    6256  pBlockInfo->SortAllocationsBySizeDescecnding();
    6257  }
    6258 
    6259  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    6260  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    6261 
    6262  // Execute defragmentation rounds (the main part).
    6263  VkResult result = VK_SUCCESS;
    6264  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
    6265  {
    6266  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
    6267  }
    6268 
    6269  // Unmap blocks that were mapped for defragmentation.
    6270  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    6271  {
    6272  m_Blocks[blockIndex]->Unmap(m_hAllocator);
    6273  }
    6274 
    6275  return result;
    6276 }
    6277 
    6278 bool VmaDefragmentator::MoveMakesSense(
    6279  size_t dstBlockIndex, VkDeviceSize dstOffset,
    6280  size_t srcBlockIndex, VkDeviceSize srcOffset)
    6281 {
    6282  if(dstBlockIndex < srcBlockIndex)
    6283  {
    6284  return true;
    6285  }
    6286  if(dstBlockIndex > srcBlockIndex)
    6287  {
    6288  return false;
    6289  }
    6290  if(dstOffset < srcOffset)
    6291  {
    6292  return true;
    6293  }
    6294  return false;
    6295 }
    6296 
    6298 // VmaAllocator_T
    6299 
    6300 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    6301  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    6302  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    6303  m_PhysicalDevice(pCreateInfo->physicalDevice),
    6304  m_hDevice(pCreateInfo->device),
    6305  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    6306  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    6307  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    6308  m_UnmapPersistentlyMappedMemoryCounter(0),
    6309  m_PreferredLargeHeapBlockSize(0),
    6310  m_PreferredSmallHeapBlockSize(0),
    6311  m_CurrentFrameIndex(0),
    6312  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
    6313 {
    6314  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    6315 
    6316  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    6317  memset(&m_MemProps, 0, sizeof(m_MemProps));
    6318  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    6319 
    6320  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    6321  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    6322 
    6323  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    6324  {
    6325  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    6326  }
    6327 
    6328  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    6329  {
    6330  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    6331  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    6332  }
    6333 
    6334  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    6335 
    6336  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    6337  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    6338 
    6339  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    6340  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    6341  m_PreferredSmallHeapBlockSize = (pCreateInfo->preferredSmallHeapBlockSize != 0) ?
    6342  pCreateInfo->preferredSmallHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE);
    6343 
    6344  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    6345  {
    6346  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    6347  {
    6348  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    6349  if(limit != VK_WHOLE_SIZE)
    6350  {
    6351  m_HeapSizeLimit[heapIndex] = limit;
    6352  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    6353  {
    6354  m_MemProps.memoryHeaps[heapIndex].size = limit;
    6355  }
    6356  }
    6357  }
    6358  }
    6359 
    6360  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    6361  {
    6362  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    6363 
    6364  for(size_t blockVectorTypeIndex = 0; blockVectorTypeIndex < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorTypeIndex)
    6365  {
    6366  m_pBlockVectors[memTypeIndex][blockVectorTypeIndex] = vma_new(this, VmaBlockVector)(
    6367  this,
    6368  memTypeIndex,
    6369  static_cast<VMA_BLOCK_VECTOR_TYPE>(blockVectorTypeIndex),
    6370  preferredBlockSize,
    6371  0,
    6372  SIZE_MAX,
    6373  GetBufferImageGranularity(),
    6374  pCreateInfo->frameInUseCount,
    6375  false); // isCustomPool
    6376  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    6377  // becase minBlockCount is 0.
    6378  m_pDedicatedAllocations[memTypeIndex][blockVectorTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    6379  }
    6380  }
    6381 }
    6382 
    6383 VmaAllocator_T::~VmaAllocator_T()
    6384 {
    6385  VMA_ASSERT(m_Pools.empty());
    6386 
    6387  for(size_t i = GetMemoryTypeCount(); i--; )
    6388  {
    6389  for(size_t j = VMA_BLOCK_VECTOR_TYPE_COUNT; j--; )
    6390  {
    6391  vma_delete(this, m_pDedicatedAllocations[i][j]);
    6392  vma_delete(this, m_pBlockVectors[i][j]);
    6393  }
    6394  }
    6395 }
    6396 
    6397 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    6398 {
    6399 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    6400  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
    6401  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
    6402  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    6403  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
    6404  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
    6405  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
    6406  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
    6407  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
    6408  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
    6409  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
    6410  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
    6411  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
    6412  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
    6413  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
    6414  // Ignoring vkGetBufferMemoryRequirements2KHR.
    6415  // Ignoring vkGetImageMemoryRequirements2KHR.
    6416 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    6417 
    6418 #define VMA_COPY_IF_NOT_NULL(funcName) \
    6419  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    6420 
    6421  if(pVulkanFunctions != VMA_NULL)
    6422  {
    6423  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    6424  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    6425  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    6426  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    6427  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    6428  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    6429  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    6430  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    6431  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    6432  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    6433  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    6434  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    6435  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    6436  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    6437  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    6438  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    6439  }
    6440 
    6441 #undef VMA_COPY_IF_NOT_NULL
    6442 
    6443  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    6444  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    6445  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    6446  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    6447  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    6448  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    6449  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    6450  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    6451  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    6452  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    6453  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    6454  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    6455  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    6456  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    6457  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    6458  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    6459  if(m_UseKhrDedicatedAllocation)
    6460  {
    6461  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    6462  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    6463  }
    6464 }
    6465 
    6466 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    6467 {
    6468  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    6469  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    6470  return (heapSize <= VMA_SMALL_HEAP_MAX_SIZE) ?
    6471  m_PreferredSmallHeapBlockSize : m_PreferredLargeHeapBlockSize;
    6472 }
    6473 
    6474 VkResult VmaAllocator_T::AllocateMemoryOfType(
    6475  const VkMemoryRequirements& vkMemReq,
    6476  bool dedicatedAllocation,
    6477  VkBuffer dedicatedBuffer,
    6478  VkImage dedicatedImage,
    6479  const VmaAllocationCreateInfo& createInfo,
    6480  uint32_t memTypeIndex,
    6481  VmaSuballocationType suballocType,
    6482  VmaAllocation* pAllocation)
    6483 {
    6484  VMA_ASSERT(pAllocation != VMA_NULL);
    6485  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
    6486 
    6487  uint32_t blockVectorType = VmaAllocationCreateFlagsToBlockVectorType(createInfo.flags);
    6488  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
    6489  VMA_ASSERT(blockVector);
    6490 
    6491  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    6492 
    6493  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    6494  bool preferDedicatedMemory =
    6495  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    6496  dedicatedAllocation ||
    6497  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    6498  vkMemReq.size > preferredBlockSize / 2;
    6499 
    6500  if(preferDedicatedMemory &&
    6501  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    6502  finalCreateInfo.pool == VK_NULL_HANDLE)
    6503  {
    6505  }
    6506 
    6507  // If memory type is not HOST_VISIBLE, disable PERSISTENT_MAP.
    6508  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0 &&
    6509  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    6510  {
    6511  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT;
    6512  }
    6513 
    6514  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    6515  {
    6516  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    6517  {
    6518  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6519  }
    6520  else
    6521  {
    6522  return AllocateDedicatedMemory(
    6523  vkMemReq.size,
    6524  suballocType,
    6525  memTypeIndex,
    6526  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0,
    6527  finalCreateInfo.pUserData,
    6528  dedicatedBuffer,
    6529  dedicatedImage,
    6530  pAllocation);
    6531  }
    6532  }
    6533  else
    6534  {
    6535  VkResult res = blockVector->Allocate(
    6536  VK_NULL_HANDLE, // hCurrentPool
    6537  m_CurrentFrameIndex.load(),
    6538  vkMemReq,
    6539  finalCreateInfo,
    6540  suballocType,
    6541  pAllocation);
    6542  if(res == VK_SUCCESS)
    6543  {
    6544  return res;
    6545  }
    6546 
    6547  // 5. Try dedicated memory.
    6548  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    6549  {
    6550  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6551  }
    6552  else
    6553  {
    6554  res = AllocateDedicatedMemory(
    6555  vkMemReq.size,
    6556  suballocType,
    6557  memTypeIndex,
    6558  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0,
    6559  finalCreateInfo.pUserData,
    6560  dedicatedBuffer,
    6561  dedicatedImage,
    6562  pAllocation);
    6563  if(res == VK_SUCCESS)
    6564  {
    6565  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    6566  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    6567  return VK_SUCCESS;
    6568  }
    6569  else
    6570  {
    6571  // Everything failed: Return error code.
    6572  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    6573  return res;
    6574  }
    6575  }
    6576  }
    6577 }
    6578 
    6579 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    6580  VkDeviceSize size,
    6581  VmaSuballocationType suballocType,
    6582  uint32_t memTypeIndex,
    6583  bool map,
    6584  void* pUserData,
    6585  VkBuffer dedicatedBuffer,
    6586  VkImage dedicatedImage,
    6587  VmaAllocation* pAllocation)
    6588 {
    6589  VMA_ASSERT(pAllocation);
    6590 
    6591  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    6592  allocInfo.memoryTypeIndex = memTypeIndex;
    6593  allocInfo.allocationSize = size;
    6594 
    6595  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    6596  if(m_UseKhrDedicatedAllocation)
    6597  {
    6598  if(dedicatedBuffer != VK_NULL_HANDLE)
    6599  {
    6600  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    6601  dedicatedAllocInfo.buffer = dedicatedBuffer;
    6602  allocInfo.pNext = &dedicatedAllocInfo;
    6603  }
    6604  else if(dedicatedImage != VK_NULL_HANDLE)
    6605  {
    6606  dedicatedAllocInfo.image = dedicatedImage;
    6607  allocInfo.pNext = &dedicatedAllocInfo;
    6608  }
    6609  }
    6610 
    6611  // Allocate VkDeviceMemory.
    6612  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    6613  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    6614  if(res < 0)
    6615  {
    6616  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    6617  return res;
    6618  }
    6619 
    6620  void* pMappedData = nullptr;
    6621  if(map)
    6622  {
    6623  if(m_UnmapPersistentlyMappedMemoryCounter == 0)
    6624  {
    6625  res = (*m_VulkanFunctions.vkMapMemory)(
    6626  m_hDevice,
    6627  hMemory,
    6628  0,
    6629  VK_WHOLE_SIZE,
    6630  0,
    6631  &pMappedData);
    6632  if(res < 0)
    6633  {
    6634  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    6635  FreeVulkanMemory(memTypeIndex, size, hMemory);
    6636  return res;
    6637  }
    6638  }
    6639  }
    6640 
    6641  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load());
    6642  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, map, pMappedData, size, pUserData);
    6643 
    6644  // Register it in m_pDedicatedAllocations.
    6645  {
    6646  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    6647  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex][map ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED];
    6648  VMA_ASSERT(pDedicatedAllocations);
    6649  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
    6650  }
    6651 
    6652  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
    6653 
    6654  return VK_SUCCESS;
    6655 }
    6656 
    6657 void VmaAllocator_T::GetBufferMemoryRequirements(
    6658  VkBuffer hBuffer,
    6659  VkMemoryRequirements& memReq,
    6660  bool& requiresDedicatedAllocation,
    6661  bool& prefersDedicatedAllocation) const
    6662 {
    6663  if(m_UseKhrDedicatedAllocation)
    6664  {
    6665  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    6666  memReqInfo.buffer = hBuffer;
    6667 
    6668  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    6669 
    6670  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    6671  memReq2.pNext = &memDedicatedReq;
    6672 
    6673  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    6674 
    6675  memReq = memReq2.memoryRequirements;
    6676  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    6677  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    6678  }
    6679  else
    6680  {
    6681  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    6682  requiresDedicatedAllocation = false;
    6683  prefersDedicatedAllocation = false;
    6684  }
    6685 }
    6686 
    6687 void VmaAllocator_T::GetImageMemoryRequirements(
    6688  VkImage hImage,
    6689  VkMemoryRequirements& memReq,
    6690  bool& requiresDedicatedAllocation,
    6691  bool& prefersDedicatedAllocation) const
    6692 {
    6693  if(m_UseKhrDedicatedAllocation)
    6694  {
    6695  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    6696  memReqInfo.image = hImage;
    6697 
    6698  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    6699 
    6700  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    6701  memReq2.pNext = &memDedicatedReq;
    6702 
    6703  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    6704 
    6705  memReq = memReq2.memoryRequirements;
    6706  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    6707  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    6708  }
    6709  else
    6710  {
    6711  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    6712  requiresDedicatedAllocation = false;
    6713  prefersDedicatedAllocation = false;
    6714  }
    6715 }
    6716 
    6717 VkResult VmaAllocator_T::AllocateMemory(
    6718  const VkMemoryRequirements& vkMemReq,
    6719  bool requiresDedicatedAllocation,
    6720  bool prefersDedicatedAllocation,
    6721  VkBuffer dedicatedBuffer,
    6722  VkImage dedicatedImage,
    6723  const VmaAllocationCreateInfo& createInfo,
    6724  VmaSuballocationType suballocType,
    6725  VmaAllocation* pAllocation)
    6726 {
    6727  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    6728  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    6729  {
    6730  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    6731  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6732  }
    6733  if(requiresDedicatedAllocation)
    6734  {
    6735  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    6736  {
    6737  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    6738  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6739  }
    6740  if(createInfo.pool != VK_NULL_HANDLE)
    6741  {
    6742  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    6743  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6744  }
    6745  }
    6746  if((createInfo.pool != VK_NULL_HANDLE) &&
    6747  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    6748  {
    6749  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    6750  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6751  }
    6752 
    6753  if(createInfo.pool != VK_NULL_HANDLE)
    6754  {
    6755  return createInfo.pool->m_BlockVector.Allocate(
    6756  createInfo.pool,
    6757  m_CurrentFrameIndex.load(),
    6758  vkMemReq,
    6759  createInfo,
    6760  suballocType,
    6761  pAllocation);
    6762  }
    6763  else
    6764  {
    6765  // Bit mask of memory Vulkan types acceptable for this allocation.
    6766  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    6767  uint32_t memTypeIndex = UINT32_MAX;
    6768  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    6769  if(res == VK_SUCCESS)
    6770  {
    6771  res = AllocateMemoryOfType(
    6772  vkMemReq,
    6773  requiresDedicatedAllocation || prefersDedicatedAllocation,
    6774  dedicatedBuffer,
    6775  dedicatedImage,
    6776  createInfo,
    6777  memTypeIndex,
    6778  suballocType,
    6779  pAllocation);
    6780  // Succeeded on first try.
    6781  if(res == VK_SUCCESS)
    6782  {
    6783  return res;
    6784  }
    6785  // Allocation from this memory type failed. Try other compatible memory types.
    6786  else
    6787  {
    6788  for(;;)
    6789  {
    6790  // Remove old memTypeIndex from list of possibilities.
    6791  memoryTypeBits &= ~(1u << memTypeIndex);
    6792  // Find alternative memTypeIndex.
    6793  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    6794  if(res == VK_SUCCESS)
    6795  {
    6796  res = AllocateMemoryOfType(
    6797  vkMemReq,
    6798  requiresDedicatedAllocation || prefersDedicatedAllocation,
    6799  dedicatedBuffer,
    6800  dedicatedImage,
    6801  createInfo,
    6802  memTypeIndex,
    6803  suballocType,
    6804  pAllocation);
    6805  // Allocation from this alternative memory type succeeded.
    6806  if(res == VK_SUCCESS)
    6807  {
    6808  return res;
    6809  }
    6810  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    6811  }
    6812  // No other matching memory type index could be found.
    6813  else
    6814  {
    6815  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    6816  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6817  }
    6818  }
    6819  }
    6820  }
    6821  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    6822  else
    6823  return res;
    6824  }
    6825 }
    6826 
    6827 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
    6828 {
    6829  VMA_ASSERT(allocation);
    6830 
    6831  if(allocation->CanBecomeLost() == false ||
    6832  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    6833  {
    6834  switch(allocation->GetType())
    6835  {
    6836  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    6837  {
    6838  VmaBlockVector* pBlockVector = VMA_NULL;
    6839  VmaPool hPool = allocation->GetPool();
    6840  if(hPool != VK_NULL_HANDLE)
    6841  {
    6842  pBlockVector = &hPool->m_BlockVector;
    6843  }
    6844  else
    6845  {
    6846  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    6847  const VMA_BLOCK_VECTOR_TYPE blockVectorType = allocation->GetBlockVectorType();
    6848  pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
    6849  }
    6850  pBlockVector->Free(allocation);
    6851  }
    6852  break;
    6853  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    6854  FreeDedicatedMemory(allocation);
    6855  break;
    6856  default:
    6857  VMA_ASSERT(0);
    6858  }
    6859  }
    6860 
    6861  vma_delete(this, allocation);
    6862 }
    6863 
    6864 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    6865 {
    6866  // Initialize.
    6867  InitStatInfo(pStats->total);
    6868  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    6869  InitStatInfo(pStats->memoryType[i]);
    6870  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    6871  InitStatInfo(pStats->memoryHeap[i]);
    6872 
    6873  // Process default pools.
    6874  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    6875  {
    6876  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    6877  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
    6878  {
    6879  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
    6880  VMA_ASSERT(pBlockVector);
    6881  pBlockVector->AddStats(pStats);
    6882  }
    6883  }
    6884 
    6885  // Process custom pools.
    6886  {
    6887  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    6888  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    6889  {
    6890  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
    6891  }
    6892  }
    6893 
    6894  // Process dedicated allocations.
    6895  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    6896  {
    6897  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    6898  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    6899  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
    6900  {
    6901  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex][blockVectorType];
    6902  VMA_ASSERT(pDedicatedAllocVector);
    6903  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    6904  {
    6905  VmaStatInfo allocationStatInfo;
    6906  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    6907  VmaAddStatInfo(pStats->total, allocationStatInfo);
    6908  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    6909  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    6910  }
    6911  }
    6912  }
    6913 
    6914  // Postprocess.
    6915  VmaPostprocessCalcStatInfo(pStats->total);
    6916  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    6917  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    6918  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    6919  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    6920 }
    6921 
    6922 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    6923 
    6924 void VmaAllocator_T::UnmapPersistentlyMappedMemory()
    6925 {
    6926  if(m_UnmapPersistentlyMappedMemoryCounter++ == 0)
    6927  {
    6928  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
    6929  {
    6930  for(uint32_t memTypeIndex = m_MemProps.memoryTypeCount; memTypeIndex--; )
    6931  {
    6932  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    6933  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
    6934  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    6935  {
    6936  // Process DedicatedAllocations.
    6937  {
    6938  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    6939  AllocationVectorType* pDedicatedAllocationsVector = m_pDedicatedAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
    6940  for(size_t dedicatedAllocIndex = pDedicatedAllocationsVector->size(); dedicatedAllocIndex--; )
    6941  {
    6942  VmaAllocation hAlloc = (*pDedicatedAllocationsVector)[dedicatedAllocIndex];
    6943  hAlloc->DedicatedAllocUnmapPersistentlyMappedMemory(this);
    6944  }
    6945  }
    6946 
    6947  // Process normal Allocations.
    6948  {
    6949  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
    6950  pBlockVector->UnmapPersistentlyMappedMemory();
    6951  }
    6952  }
    6953  }
    6954 
    6955  // Process custom pools.
    6956  {
    6957  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    6958  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    6959  {
    6960  m_Pools[poolIndex]->GetBlockVector().UnmapPersistentlyMappedMemory();
    6961  }
    6962  }
    6963  }
    6964  }
    6965 }
    6966 
    6967 VkResult VmaAllocator_T::MapPersistentlyMappedMemory()
    6968 {
    6969  VMA_ASSERT(m_UnmapPersistentlyMappedMemoryCounter > 0);
    6970  if(--m_UnmapPersistentlyMappedMemoryCounter == 0)
    6971  {
    6972  VkResult finalResult = VK_SUCCESS;
    6973  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
    6974  {
    6975  // Process custom pools.
    6976  {
    6977  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    6978  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    6979  {
    6980  m_Pools[poolIndex]->GetBlockVector().MapPersistentlyMappedMemory();
    6981  }
    6982  }
    6983 
    6984  for(uint32_t memTypeIndex = 0; memTypeIndex < m_MemProps.memoryTypeCount; ++memTypeIndex)
    6985  {
    6986  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    6987  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
    6988  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    6989  {
    6990  // Process DedicatedAllocations.
    6991  {
    6992  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    6993  AllocationVectorType* pAllocationsVector = m_pDedicatedAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
    6994  for(size_t dedicatedAllocIndex = 0, dedicatedAllocCount = pAllocationsVector->size(); dedicatedAllocIndex < dedicatedAllocCount; ++dedicatedAllocIndex)
    6995  {
    6996  VmaAllocation hAlloc = (*pAllocationsVector)[dedicatedAllocIndex];
    6997  hAlloc->DedicatedAllocMapPersistentlyMappedMemory(this);
    6998  }
    6999  }
    7000 
    7001  // Process normal Allocations.
    7002  {
    7003  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
    7004  VkResult localResult = pBlockVector->MapPersistentlyMappedMemory();
    7005  if(localResult != VK_SUCCESS)
    7006  {
    7007  finalResult = localResult;
    7008  }
    7009  }
    7010  }
    7011  }
    7012  }
    7013  return finalResult;
    7014  }
    7015  else
    7016  return VK_SUCCESS;
    7017 }
    7018 
    7019 VkResult VmaAllocator_T::Defragment(
    7020  VmaAllocation* pAllocations,
    7021  size_t allocationCount,
    7022  VkBool32* pAllocationsChanged,
    7023  const VmaDefragmentationInfo* pDefragmentationInfo,
    7024  VmaDefragmentationStats* pDefragmentationStats)
    7025 {
    7026  if(pAllocationsChanged != VMA_NULL)
    7027  {
    7028  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
    7029  }
    7030  if(pDefragmentationStats != VMA_NULL)
    7031  {
    7032  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
    7033  }
    7034 
    7035  if(m_UnmapPersistentlyMappedMemoryCounter > 0)
    7036  {
    7037  VMA_DEBUG_LOG("ERROR: Cannot defragment when inside vmaUnmapPersistentlyMappedMemory.");
    7038  return VK_ERROR_MEMORY_MAP_FAILED;
    7039  }
    7040 
    7041  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
    7042 
    7043  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
    7044 
    7045  const size_t poolCount = m_Pools.size();
    7046 
    7047  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
    7048  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    7049  {
    7050  VmaAllocation hAlloc = pAllocations[allocIndex];
    7051  VMA_ASSERT(hAlloc);
    7052  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    7053  // DedicatedAlloc cannot be defragmented.
    7054  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    7055  // Only HOST_VISIBLE memory types can be defragmented.
    7056  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
    7057  // Lost allocation cannot be defragmented.
    7058  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    7059  {
    7060  VmaBlockVector* pAllocBlockVector = nullptr;
    7061 
    7062  const VmaPool hAllocPool = hAlloc->GetPool();
    7063  // This allocation belongs to custom pool.
    7064  if(hAllocPool != VK_NULL_HANDLE)
    7065  {
    7066  pAllocBlockVector = &hAllocPool->GetBlockVector();
    7067  }
    7068  // This allocation belongs to general pool.
    7069  else
    7070  {
    7071  pAllocBlockVector = m_pBlockVectors[memTypeIndex][hAlloc->GetBlockVectorType()];
    7072  }
    7073 
    7074  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
    7075 
    7076  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    7077  &pAllocationsChanged[allocIndex] : VMA_NULL;
    7078  pDefragmentator->AddAllocation(hAlloc, pChanged);
    7079  }
    7080  }
    7081 
    7082  VkResult result = VK_SUCCESS;
    7083 
    7084  // ======== Main processing.
    7085 
    7086  VkDeviceSize maxBytesToMove = SIZE_MAX;
    7087  uint32_t maxAllocationsToMove = UINT32_MAX;
    7088  if(pDefragmentationInfo != VMA_NULL)
    7089  {
    7090  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
    7091  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    7092  }
    7093 
    7094  // Process standard memory.
    7095  for(uint32_t memTypeIndex = 0;
    7096  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
    7097  ++memTypeIndex)
    7098  {
    7099  // Only HOST_VISIBLE memory types can be defragmented.
    7100  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    7101  {
    7102  for(uint32_t blockVectorType = 0;
    7103  (blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT) && (result == VK_SUCCESS);
    7104  ++blockVectorType)
    7105  {
    7106  result = m_pBlockVectors[memTypeIndex][blockVectorType]->Defragment(
    7107  pDefragmentationStats,
    7108  maxBytesToMove,
    7109  maxAllocationsToMove);
    7110  }
    7111  }
    7112  }
    7113 
    7114  // Process custom pools.
    7115  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
    7116  {
    7117  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
    7118  pDefragmentationStats,
    7119  maxBytesToMove,
    7120  maxAllocationsToMove);
    7121  }
    7122 
    7123  // ======== Destroy defragmentators.
    7124 
    7125  // Process custom pools.
    7126  for(size_t poolIndex = poolCount; poolIndex--; )
    7127  {
    7128  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
    7129  }
    7130 
    7131  // Process standard memory.
    7132  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
    7133  {
    7134  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    7135  {
    7136  for(size_t blockVectorType = VMA_BLOCK_VECTOR_TYPE_COUNT; blockVectorType--; )
    7137  {
    7138  m_pBlockVectors[memTypeIndex][blockVectorType]->DestroyDefragmentator();
    7139  }
    7140  }
    7141  }
    7142 
    7143  return result;
    7144 }
    7145 
    7146 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    7147 {
    7148  if(hAllocation->CanBecomeLost())
    7149  {
    7150  /*
    7151  Warning: This is a carefully designed algorithm.
    7152  Do not modify unless you really know what you're doing :)
    7153  */
    7154  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    7155  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    7156  for(;;)
    7157  {
    7158  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    7159  {
    7160  pAllocationInfo->memoryType = UINT32_MAX;
    7161  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    7162  pAllocationInfo->offset = 0;
    7163  pAllocationInfo->size = hAllocation->GetSize();
    7164  pAllocationInfo->pMappedData = VMA_NULL;
    7165  pAllocationInfo->pUserData = hAllocation->GetUserData();
    7166  return;
    7167  }
    7168  else if(localLastUseFrameIndex == localCurrFrameIndex)
    7169  {
    7170  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    7171  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    7172  pAllocationInfo->offset = hAllocation->GetOffset();
    7173  pAllocationInfo->size = hAllocation->GetSize();
    7174  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    7175  pAllocationInfo->pUserData = hAllocation->GetUserData();
    7176  return;
    7177  }
    7178  else // Last use time earlier than current time.
    7179  {
    7180  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    7181  {
    7182  localLastUseFrameIndex = localCurrFrameIndex;
    7183  }
    7184  }
    7185  }
    7186  }
    7187  // We could use the same code here, but for performance reasons we don't need to use the hAllocation.LastUseFrameIndex atomic.
    7188  else
    7189  {
    7190  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    7191  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    7192  pAllocationInfo->offset = hAllocation->GetOffset();
    7193  pAllocationInfo->size = hAllocation->GetSize();
    7194  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    7195  pAllocationInfo->pUserData = hAllocation->GetUserData();
    7196  }
    7197 }
    7198 
    7199 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    7200 {
    7201  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
    7202 
    7203  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    7204 
    7205  if(newCreateInfo.maxBlockCount == 0)
    7206  {
    7207  newCreateInfo.maxBlockCount = SIZE_MAX;
    7208  }
    7209  if(newCreateInfo.blockSize == 0)
    7210  {
    7211  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    7212  }
    7213 
    7214  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
    7215 
    7216  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    7217  if(res != VK_SUCCESS)
    7218  {
    7219  vma_delete(this, *pPool);
    7220  *pPool = VMA_NULL;
    7221  return res;
    7222  }
    7223 
    7224  // Add to m_Pools.
    7225  {
    7226  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    7227  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    7228  }
    7229 
    7230  return VK_SUCCESS;
    7231 }
    7232 
    7233 void VmaAllocator_T::DestroyPool(VmaPool pool)
    7234 {
    7235  // Remove from m_Pools.
    7236  {
    7237  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    7238  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    7239  VMA_ASSERT(success && "Pool not found in Allocator.");
    7240  }
    7241 
    7242  vma_delete(this, pool);
    7243 }
    7244 
    7245 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    7246 {
    7247  pool->m_BlockVector.GetPoolStats(pPoolStats);
    7248 }
    7249 
    7250 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    7251 {
    7252  m_CurrentFrameIndex.store(frameIndex);
    7253 }
    7254 
    7255 void VmaAllocator_T::MakePoolAllocationsLost(
    7256  VmaPool hPool,
    7257  size_t* pLostAllocationCount)
    7258 {
    7259  hPool->m_BlockVector.MakePoolAllocationsLost(
    7260  m_CurrentFrameIndex.load(),
    7261  pLostAllocationCount);
    7262 }
    7263 
    7264 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    7265 {
    7266  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST);
    7267  (*pAllocation)->InitLost();
    7268 }
    7269 
    7270 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    7271 {
    7272  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    7273 
    7274  VkResult res;
    7275  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    7276  {
    7277  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    7278  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    7279  {
    7280  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    7281  if(res == VK_SUCCESS)
    7282  {
    7283  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    7284  }
    7285  }
    7286  else
    7287  {
    7288  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    7289  }
    7290  }
    7291  else
    7292  {
    7293  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    7294  }
    7295 
    7296  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    7297  {
    7298  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    7299  }
    7300 
    7301  return res;
    7302 }
    7303 
    7304 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    7305 {
    7306  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    7307  {
    7308  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    7309  }
    7310 
    7311  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    7312 
    7313  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    7314  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    7315  {
    7316  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    7317  m_HeapSizeLimit[heapIndex] += size;
    7318  }
    7319 }
    7320 
    7321 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    7322 {
    7323  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    7324 
    7325  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    7326  {
    7327  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    7328  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex][allocation->GetBlockVectorType()];
    7329  VMA_ASSERT(pDedicatedAllocations);
    7330  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    7331  VMA_ASSERT(success);
    7332  }
    7333 
    7334  VkDeviceMemory hMemory = allocation->GetMemory();
    7335 
    7336  if(allocation->GetMappedData() != VMA_NULL)
    7337  {
    7338  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    7339  }
    7340 
    7341  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    7342 
    7343  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    7344 }
    7345 
    7346 #if VMA_STATS_STRING_ENABLED
    7347 
    7348 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    7349 {
    7350  bool dedicatedAllocationsStarted = false;
    7351  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    7352  {
    7353  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    7354  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
    7355  {
    7356  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex][blockVectorType];
    7357  VMA_ASSERT(pDedicatedAllocVector);
    7358  if(pDedicatedAllocVector->empty() == false)
    7359  {
    7360  if(dedicatedAllocationsStarted == false)
    7361  {
    7362  dedicatedAllocationsStarted = true;
    7363  json.WriteString("DedicatedAllocations");
    7364  json.BeginObject();
    7365  }
    7366 
    7367  json.BeginString("Type ");
    7368  json.ContinueString(memTypeIndex);
    7369  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
    7370  {
    7371  json.ContinueString(" Mapped");
    7372  }
    7373  json.EndString();
    7374 
    7375  json.BeginArray();
    7376 
    7377  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    7378  {
    7379  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    7380  json.BeginObject(true);
    7381 
    7382  json.WriteString("Size");
    7383  json.WriteNumber(hAlloc->GetSize());
    7384 
    7385  json.WriteString("Type");
    7386  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
    7387 
    7388  json.EndObject();
    7389  }
    7390 
    7391  json.EndArray();
    7392  }
    7393  }
    7394  }
    7395  if(dedicatedAllocationsStarted)
    7396  {
    7397  json.EndObject();
    7398  }
    7399 
    7400  {
    7401  bool allocationsStarted = false;
    7402  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    7403  {
    7404  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
    7405  {
    7406  if(m_pBlockVectors[memTypeIndex][blockVectorType]->IsEmpty() == false)
    7407  {
    7408  if(allocationsStarted == false)
    7409  {
    7410  allocationsStarted = true;
    7411  json.WriteString("DefaultPools");
    7412  json.BeginObject();
    7413  }
    7414 
    7415  json.BeginString("Type ");
    7416  json.ContinueString(memTypeIndex);
    7417  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
    7418  {
    7419  json.ContinueString(" Mapped");
    7420  }
    7421  json.EndString();
    7422 
    7423  m_pBlockVectors[memTypeIndex][blockVectorType]->PrintDetailedMap(json);
    7424  }
    7425  }
    7426  }
    7427  if(allocationsStarted)
    7428  {
    7429  json.EndObject();
    7430  }
    7431  }
    7432 
    7433  {
    7434  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    7435  const size_t poolCount = m_Pools.size();
    7436  if(poolCount > 0)
    7437  {
    7438  json.WriteString("Pools");
    7439  json.BeginArray();
    7440  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    7441  {
    7442  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    7443  }
    7444  json.EndArray();
    7445  }
    7446  }
    7447 }
    7448 
    7449 #endif // #if VMA_STATS_STRING_ENABLED
    7450 
    7451 static VkResult AllocateMemoryForImage(
    7452  VmaAllocator allocator,
    7453  VkImage image,
    7454  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    7455  VmaSuballocationType suballocType,
    7456  VmaAllocation* pAllocation)
    7457 {
    7458  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
    7459 
    7460  VkMemoryRequirements vkMemReq = {};
    7461  bool requiresDedicatedAllocation = false;
    7462  bool prefersDedicatedAllocation = false;
    7463  allocator->GetImageMemoryRequirements(image, vkMemReq,
    7464  requiresDedicatedAllocation, prefersDedicatedAllocation);
    7465 
    7466  return allocator->AllocateMemory(
    7467  vkMemReq,
    7468  requiresDedicatedAllocation,
    7469  prefersDedicatedAllocation,
    7470  VK_NULL_HANDLE, // dedicatedBuffer
    7471  image, // dedicatedImage
    7472  *pAllocationCreateInfo,
    7473  suballocType,
    7474  pAllocation);
    7475 }
    7476 
    7478 // Public interface
    7479 
    7480 VkResult vmaCreateAllocator(
    7481  const VmaAllocatorCreateInfo* pCreateInfo,
    7482  VmaAllocator* pAllocator)
    7483 {
    7484  VMA_ASSERT(pCreateInfo && pAllocator);
    7485  VMA_DEBUG_LOG("vmaCreateAllocator");
    7486  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    7487  return VK_SUCCESS;
    7488 }
    7489 
    7490 void vmaDestroyAllocator(
    7491  VmaAllocator allocator)
    7492 {
    7493  if(allocator != VK_NULL_HANDLE)
    7494  {
    7495  VMA_DEBUG_LOG("vmaDestroyAllocator");
    7496  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    7497  vma_delete(&allocationCallbacks, allocator);
    7498  }
    7499 }
    7500 
    7502  VmaAllocator allocator,
    7503  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    7504 {
    7505  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    7506  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    7507 }
    7508 
    7510  VmaAllocator allocator,
    7511  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    7512 {
    7513  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    7514  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    7515 }
    7516 
    7518  VmaAllocator allocator,
    7519  uint32_t memoryTypeIndex,
    7520  VkMemoryPropertyFlags* pFlags)
    7521 {
    7522  VMA_ASSERT(allocator && pFlags);
    7523  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    7524  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    7525 }
    7526 
    7528  VmaAllocator allocator,
    7529  uint32_t frameIndex)
    7530 {
    7531  VMA_ASSERT(allocator);
    7532  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    7533 
    7534  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7535 
    7536  allocator->SetCurrentFrameIndex(frameIndex);
    7537 }
    7538 
    7539 void vmaCalculateStats(
    7540  VmaAllocator allocator,
    7541  VmaStats* pStats)
    7542 {
    7543  VMA_ASSERT(allocator && pStats);
    7544  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7545  allocator->CalculateStats(pStats);
    7546 }
    7547 
    7548 #if VMA_STATS_STRING_ENABLED
    7549 
    7550 void vmaBuildStatsString(
    7551  VmaAllocator allocator,
    7552  char** ppStatsString,
    7553  VkBool32 detailedMap)
    7554 {
    7555  VMA_ASSERT(allocator && ppStatsString);
    7556  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7557 
    7558  VmaStringBuilder sb(allocator);
    7559  {
    7560  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    7561  json.BeginObject();
    7562 
    7563  VmaStats stats;
    7564  allocator->CalculateStats(&stats);
    7565 
    7566  json.WriteString("Total");
    7567  VmaPrintStatInfo(json, stats.total);
    7568 
    7569  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    7570  {
    7571  json.BeginString("Heap ");
    7572  json.ContinueString(heapIndex);
    7573  json.EndString();
    7574  json.BeginObject();
    7575 
    7576  json.WriteString("Size");
    7577  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    7578 
    7579  json.WriteString("Flags");
    7580  json.BeginArray(true);
    7581  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    7582  {
    7583  json.WriteString("DEVICE_LOCAL");
    7584  }
    7585  json.EndArray();
    7586 
    7587  if(stats.memoryHeap[heapIndex].blockCount > 0)
    7588  {
    7589  json.WriteString("Stats");
    7590  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    7591  }
    7592 
    7593  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    7594  {
    7595  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    7596  {
    7597  json.BeginString("Type ");
    7598  json.ContinueString(typeIndex);
    7599  json.EndString();
    7600 
    7601  json.BeginObject();
    7602 
    7603  json.WriteString("Flags");
    7604  json.BeginArray(true);
    7605  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    7606  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    7607  {
    7608  json.WriteString("DEVICE_LOCAL");
    7609  }
    7610  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    7611  {
    7612  json.WriteString("HOST_VISIBLE");
    7613  }
    7614  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    7615  {
    7616  json.WriteString("HOST_COHERENT");
    7617  }
    7618  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    7619  {
    7620  json.WriteString("HOST_CACHED");
    7621  }
    7622  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    7623  {
    7624  json.WriteString("LAZILY_ALLOCATED");
    7625  }
    7626  json.EndArray();
    7627 
    7628  if(stats.memoryType[typeIndex].blockCount > 0)
    7629  {
    7630  json.WriteString("Stats");
    7631  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    7632  }
    7633 
    7634  json.EndObject();
    7635  }
    7636  }
    7637 
    7638  json.EndObject();
    7639  }
    7640  if(detailedMap == VK_TRUE)
    7641  {
    7642  allocator->PrintDetailedMap(json);
    7643  }
    7644 
    7645  json.EndObject();
    7646  }
    7647 
    7648  const size_t len = sb.GetLength();
    7649  char* const pChars = vma_new_array(allocator, char, len + 1);
    7650  if(len > 0)
    7651  {
    7652  memcpy(pChars, sb.GetData(), len);
    7653  }
    7654  pChars[len] = '\0';
    7655  *ppStatsString = pChars;
    7656 }
    7657 
    7658 void vmaFreeStatsString(
    7659  VmaAllocator allocator,
    7660  char* pStatsString)
    7661 {
    7662  if(pStatsString != VMA_NULL)
    7663  {
    7664  VMA_ASSERT(allocator);
    7665  size_t len = strlen(pStatsString);
    7666  vma_delete_array(allocator, pStatsString, len + 1);
    7667  }
    7668 }
    7669 
    7670 #endif // #if VMA_STATS_STRING_ENABLED
    7671 
    7674 VkResult vmaFindMemoryTypeIndex(
    7675  VmaAllocator allocator,
    7676  uint32_t memoryTypeBits,
    7677  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    7678  uint32_t* pMemoryTypeIndex)
    7679 {
    7680  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    7681  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    7682  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    7683 
    7684  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    7685  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    7686  if(preferredFlags == 0)
    7687  {
    7688  preferredFlags = requiredFlags;
    7689  }
    7690  // preferredFlags, if not 0, must be a superset of requiredFlags.
    7691  VMA_ASSERT((requiredFlags & ~preferredFlags) == 0);
    7692 
    7693  // Convert usage to requiredFlags and preferredFlags.
    7694  switch(pAllocationCreateInfo->usage)
    7695  {
    7697  break;
    7699  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    7700  break;
    7702  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    7703  break;
    7705  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    7706  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    7707  break;
    7709  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    7710  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    7711  break;
    7712  default:
    7713  break;
    7714  }
    7715 
    7716  *pMemoryTypeIndex = UINT32_MAX;
    7717  uint32_t minCost = UINT32_MAX;
    7718  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    7719  memTypeIndex < allocator->GetMemoryTypeCount();
    7720  ++memTypeIndex, memTypeBit <<= 1)
    7721  {
    7722  // This memory type is acceptable according to memoryTypeBits bitmask.
    7723  if((memTypeBit & memoryTypeBits) != 0)
    7724  {
    7725  const VkMemoryPropertyFlags currFlags =
    7726  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    7727  // This memory type contains requiredFlags.
    7728  if((requiredFlags & ~currFlags) == 0)
    7729  {
    7730  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    7731  uint32_t currCost = CountBitsSet(preferredFlags & ~currFlags);
    7732  // Remember memory type with lowest cost.
    7733  if(currCost < minCost)
    7734  {
    7735  *pMemoryTypeIndex = memTypeIndex;
    7736  if(currCost == 0)
    7737  {
    7738  return VK_SUCCESS;
    7739  }
    7740  minCost = currCost;
    7741  }
    7742  }
    7743  }
    7744  }
    7745  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    7746 }
    7747 
    7748 VkResult vmaCreatePool(
    7749  VmaAllocator allocator,
    7750  const VmaPoolCreateInfo* pCreateInfo,
    7751  VmaPool* pPool)
    7752 {
    7753  VMA_ASSERT(allocator && pCreateInfo && pPool);
    7754 
    7755  VMA_DEBUG_LOG("vmaCreatePool");
    7756 
    7757  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7758 
    7759  return allocator->CreatePool(pCreateInfo, pPool);
    7760 }
    7761 
    7762 void vmaDestroyPool(
    7763  VmaAllocator allocator,
    7764  VmaPool pool)
    7765 {
    7766  VMA_ASSERT(allocator && pool);
    7767 
    7768  VMA_DEBUG_LOG("vmaDestroyPool");
    7769 
    7770  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7771 
    7772  allocator->DestroyPool(pool);
    7773 }
    7774 
    7775 void vmaGetPoolStats(
    7776  VmaAllocator allocator,
    7777  VmaPool pool,
    7778  VmaPoolStats* pPoolStats)
    7779 {
    7780  VMA_ASSERT(allocator && pool && pPoolStats);
    7781 
    7782  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7783 
    7784  allocator->GetPoolStats(pool, pPoolStats);
    7785 }
    7786 
    7788  VmaAllocator allocator,
    7789  VmaPool pool,
    7790  size_t* pLostAllocationCount)
    7791 {
    7792  VMA_ASSERT(allocator && pool);
    7793 
    7794  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7795 
    7796  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    7797 }
    7798 
    7799 VkResult vmaAllocateMemory(
    7800  VmaAllocator allocator,
    7801  const VkMemoryRequirements* pVkMemoryRequirements,
    7802  const VmaAllocationCreateInfo* pCreateInfo,
    7803  VmaAllocation* pAllocation,
    7804  VmaAllocationInfo* pAllocationInfo)
    7805 {
    7806  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    7807 
    7808  VMA_DEBUG_LOG("vmaAllocateMemory");
    7809 
    7810  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7811 
    7812  VkResult result = allocator->AllocateMemory(
    7813  *pVkMemoryRequirements,
    7814  false, // requiresDedicatedAllocation
    7815  false, // prefersDedicatedAllocation
    7816  VK_NULL_HANDLE, // dedicatedBuffer
    7817  VK_NULL_HANDLE, // dedicatedImage
    7818  *pCreateInfo,
    7819  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    7820  pAllocation);
    7821 
    7822  if(pAllocationInfo && result == VK_SUCCESS)
    7823  {
    7824  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    7825  }
    7826 
    7827  return result;
    7828 }
    7829 
    7831  VmaAllocator allocator,
    7832  VkBuffer buffer,
    7833  const VmaAllocationCreateInfo* pCreateInfo,
    7834  VmaAllocation* pAllocation,
    7835  VmaAllocationInfo* pAllocationInfo)
    7836 {
    7837  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    7838 
    7839  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    7840 
    7841  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7842 
    7843  VkMemoryRequirements vkMemReq = {};
    7844  bool requiresDedicatedAllocation = false;
    7845  bool prefersDedicatedAllocation = false;
    7846  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    7847  requiresDedicatedAllocation,
    7848  prefersDedicatedAllocation);
    7849 
    7850  VkResult result = allocator->AllocateMemory(
    7851  vkMemReq,
    7852  requiresDedicatedAllocation,
    7853  prefersDedicatedAllocation,
    7854  buffer, // dedicatedBuffer
    7855  VK_NULL_HANDLE, // dedicatedImage
    7856  *pCreateInfo,
    7857  VMA_SUBALLOCATION_TYPE_BUFFER,
    7858  pAllocation);
    7859 
    7860  if(pAllocationInfo && result == VK_SUCCESS)
    7861  {
    7862  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    7863  }
    7864 
    7865  return result;
    7866 }
    7867 
    7868 VkResult vmaAllocateMemoryForImage(
    7869  VmaAllocator allocator,
    7870  VkImage image,
    7871  const VmaAllocationCreateInfo* pCreateInfo,
    7872  VmaAllocation* pAllocation,
    7873  VmaAllocationInfo* pAllocationInfo)
    7874 {
    7875  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    7876 
    7877  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    7878 
    7879  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7880 
    7881  VkResult result = AllocateMemoryForImage(
    7882  allocator,
    7883  image,
    7884  pCreateInfo,
    7885  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    7886  pAllocation);
    7887 
    7888  if(pAllocationInfo && result == VK_SUCCESS)
    7889  {
    7890  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    7891  }
    7892 
    7893  return result;
    7894 }
    7895 
    7896 void vmaFreeMemory(
    7897  VmaAllocator allocator,
    7898  VmaAllocation allocation)
    7899 {
    7900  VMA_ASSERT(allocator && allocation);
    7901 
    7902  VMA_DEBUG_LOG("vmaFreeMemory");
    7903 
    7904  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7905 
    7906  allocator->FreeMemory(allocation);
    7907 }
    7908 
    7910  VmaAllocator allocator,
    7911  VmaAllocation allocation,
    7912  VmaAllocationInfo* pAllocationInfo)
    7913 {
    7914  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    7915 
    7916  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7917 
    7918  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    7919 }
    7920 
    7922  VmaAllocator allocator,
    7923  VmaAllocation allocation,
    7924  void* pUserData)
    7925 {
    7926  VMA_ASSERT(allocator && allocation);
    7927 
    7928  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7929 
    7930  allocation->SetUserData(pUserData);
    7931 }
    7932 
    7934  VmaAllocator allocator,
    7935  VmaAllocation* pAllocation)
    7936 {
    7937  VMA_ASSERT(allocator && pAllocation);
    7938 
    7939  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    7940 
    7941  allocator->CreateLostAllocation(pAllocation);
    7942 }
    7943 
    7944 VkResult vmaMapMemory(
    7945  VmaAllocator allocator,
    7946  VmaAllocation allocation,
    7947  void** ppData)
    7948 {
    7949  VMA_ASSERT(allocator && allocation && ppData);
    7950 
    7951  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7952 
    7953  return (*allocator->GetVulkanFunctions().vkMapMemory)(
    7954  allocator->m_hDevice,
    7955  allocation->GetMemory(),
    7956  allocation->GetOffset(),
    7957  allocation->GetSize(),
    7958  0,
    7959  ppData);
    7960 }
    7961 
    7962 void vmaUnmapMemory(
    7963  VmaAllocator allocator,
    7964  VmaAllocation allocation)
    7965 {
    7966  VMA_ASSERT(allocator && allocation);
    7967 
    7968  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7969 
    7970  (*allocator->GetVulkanFunctions().vkUnmapMemory)(allocator->m_hDevice, allocation->GetMemory());
    7971 }
    7972 
    7973 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator)
    7974 {
    7975  VMA_ASSERT(allocator);
    7976 
    7977  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7978 
    7979  allocator->UnmapPersistentlyMappedMemory();
    7980 }
    7981 
    7982 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator)
    7983 {
    7984  VMA_ASSERT(allocator);
    7985 
    7986  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7987 
    7988  return allocator->MapPersistentlyMappedMemory();
    7989 }
    7990 
    7991 VkResult vmaDefragment(
    7992  VmaAllocator allocator,
    7993  VmaAllocation* pAllocations,
    7994  size_t allocationCount,
    7995  VkBool32* pAllocationsChanged,
    7996  const VmaDefragmentationInfo *pDefragmentationInfo,
    7997  VmaDefragmentationStats* pDefragmentationStats)
    7998 {
    7999  VMA_ASSERT(allocator && pAllocations);
    8000 
    8001  VMA_DEBUG_LOG("vmaDefragment");
    8002 
    8003  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8004 
    8005  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
    8006 }
    8007 
    8008 VkResult vmaCreateBuffer(
    8009  VmaAllocator allocator,
    8010  const VkBufferCreateInfo* pBufferCreateInfo,
    8011  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    8012  VkBuffer* pBuffer,
    8013  VmaAllocation* pAllocation,
    8014  VmaAllocationInfo* pAllocationInfo)
    8015 {
    8016  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    8017 
    8018  VMA_DEBUG_LOG("vmaCreateBuffer");
    8019 
    8020  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8021 
    8022  *pBuffer = VK_NULL_HANDLE;
    8023  *pAllocation = VK_NULL_HANDLE;
    8024 
    8025  // 1. Create VkBuffer.
    8026  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    8027  allocator->m_hDevice,
    8028  pBufferCreateInfo,
    8029  allocator->GetAllocationCallbacks(),
    8030  pBuffer);
    8031  if(res >= 0)
    8032  {
    8033  // 2. vkGetBufferMemoryRequirements.
    8034  VkMemoryRequirements vkMemReq = {};
    8035  bool requiresDedicatedAllocation = false;
    8036  bool prefersDedicatedAllocation = false;
    8037  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    8038  requiresDedicatedAllocation, prefersDedicatedAllocation);
    8039 
    8040  // 3. Allocate memory using allocator.
    8041  res = allocator->AllocateMemory(
    8042  vkMemReq,
    8043  requiresDedicatedAllocation,
    8044  prefersDedicatedAllocation,
    8045  *pBuffer, // dedicatedBuffer
    8046  VK_NULL_HANDLE, // dedicatedImage
    8047  *pAllocationCreateInfo,
    8048  VMA_SUBALLOCATION_TYPE_BUFFER,
    8049  pAllocation);
    8050  if(res >= 0)
    8051  {
    8052  // 3. Bind buffer with memory.
    8053  res = (*allocator->GetVulkanFunctions().vkBindBufferMemory)(
    8054  allocator->m_hDevice,
    8055  *pBuffer,
    8056  (*pAllocation)->GetMemory(),
    8057  (*pAllocation)->GetOffset());
    8058  if(res >= 0)
    8059  {
    8060  // All steps succeeded.
    8061  if(pAllocationInfo != VMA_NULL)
    8062  {
    8063  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    8064  }
    8065  return VK_SUCCESS;
    8066  }
    8067  allocator->FreeMemory(*pAllocation);
    8068  *pAllocation = VK_NULL_HANDLE;
    8069  return res;
    8070  }
    8071  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    8072  *pBuffer = VK_NULL_HANDLE;
    8073  return res;
    8074  }
    8075  return res;
    8076 }
    8077 
    8078 void vmaDestroyBuffer(
    8079  VmaAllocator allocator,
    8080  VkBuffer buffer,
    8081  VmaAllocation allocation)
    8082 {
    8083  if(buffer != VK_NULL_HANDLE)
    8084  {
    8085  VMA_ASSERT(allocator);
    8086 
    8087  VMA_DEBUG_LOG("vmaDestroyBuffer");
    8088 
    8089  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8090 
    8091  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    8092 
    8093  allocator->FreeMemory(allocation);
    8094  }
    8095 }
    8096 
    8097 VkResult vmaCreateImage(
    8098  VmaAllocator allocator,
    8099  const VkImageCreateInfo* pImageCreateInfo,
    8100  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    8101  VkImage* pImage,
    8102  VmaAllocation* pAllocation,
    8103  VmaAllocationInfo* pAllocationInfo)
    8104 {
    8105  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    8106 
    8107  VMA_DEBUG_LOG("vmaCreateImage");
    8108 
    8109  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8110 
    8111  *pImage = VK_NULL_HANDLE;
    8112  *pAllocation = VK_NULL_HANDLE;
    8113 
    8114  // 1. Create VkImage.
    8115  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    8116  allocator->m_hDevice,
    8117  pImageCreateInfo,
    8118  allocator->GetAllocationCallbacks(),
    8119  pImage);
    8120  if(res >= 0)
    8121  {
    8122  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    8123  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    8124  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    8125 
    8126  // 2. Allocate memory using allocator.
    8127  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
    8128  if(res >= 0)
    8129  {
    8130  // 3. Bind image with memory.
    8131  res = (*allocator->GetVulkanFunctions().vkBindImageMemory)(
    8132  allocator->m_hDevice,
    8133  *pImage,
    8134  (*pAllocation)->GetMemory(),
    8135  (*pAllocation)->GetOffset());
    8136  if(res >= 0)
    8137  {
    8138  // All steps succeeded.
    8139  if(pAllocationInfo != VMA_NULL)
    8140  {
    8141  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    8142  }
    8143  return VK_SUCCESS;
    8144  }
    8145  allocator->FreeMemory(*pAllocation);
    8146  *pAllocation = VK_NULL_HANDLE;
    8147  return res;
    8148  }
    8149  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    8150  *pImage = VK_NULL_HANDLE;
    8151  return res;
    8152  }
    8153  return res;
    8154 }
    8155 
    8156 void vmaDestroyImage(
    8157  VmaAllocator allocator,
    8158  VkImage image,
    8159  VmaAllocation allocation)
    8160 {
    8161  if(image != VK_NULL_HANDLE)
    8162  {
    8163  VMA_ASSERT(allocator);
    8164 
    8165  VMA_DEBUG_LOG("vmaDestroyImage");
    8166 
    8167  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8168 
    8169  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    8170 
    8171  allocator->FreeMemory(allocation);
    8172  }
    8173 }
    8174 
    8175 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:568
    +Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    492 #include <vulkan/vulkan.h>
    493 
    494 VK_DEFINE_HANDLE(VmaAllocator)
    495 
    496 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    498  VmaAllocator allocator,
    499  uint32_t memoryType,
    500  VkDeviceMemory memory,
    501  VkDeviceSize size);
    503 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    504  VmaAllocator allocator,
    505  uint32_t memoryType,
    506  VkDeviceMemory memory,
    507  VkDeviceSize size);
    508 
    516 typedef struct VmaDeviceMemoryCallbacks {
    522 
    558 
    561 typedef VkFlags VmaAllocatorCreateFlags;
    562 
    567 typedef struct VmaVulkanFunctions {
    568  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    569  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    570  PFN_vkAllocateMemory vkAllocateMemory;
    571  PFN_vkFreeMemory vkFreeMemory;
    572  PFN_vkMapMemory vkMapMemory;
    573  PFN_vkUnmapMemory vkUnmapMemory;
    574  PFN_vkBindBufferMemory vkBindBufferMemory;
    575  PFN_vkBindImageMemory vkBindImageMemory;
    576  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    577  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    578  PFN_vkCreateBuffer vkCreateBuffer;
    579  PFN_vkDestroyBuffer vkDestroyBuffer;
    580  PFN_vkCreateImage vkCreateImage;
    581  PFN_vkDestroyImage vkDestroyImage;
    582  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    583  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    585 
    588 {
    590  VmaAllocatorCreateFlags flags;
    592 
    593  VkPhysicalDevice physicalDevice;
    595 
    596  VkDevice device;
    598 
    601 
    604 
    605  const VkAllocationCallbacks* pAllocationCallbacks;
    607 
    622  uint32_t frameInUseCount;
    640  const VkDeviceSize* pHeapSizeLimit;
    654 
    656 VkResult vmaCreateAllocator(
    657  const VmaAllocatorCreateInfo* pCreateInfo,
    658  VmaAllocator* pAllocator);
    659 
    662  VmaAllocator allocator);
    663 
    669  VmaAllocator allocator,
    670  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    671 
    677  VmaAllocator allocator,
    678  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    679 
    687  VmaAllocator allocator,
    688  uint32_t memoryTypeIndex,
    689  VkMemoryPropertyFlags* pFlags);
    690 
    700  VmaAllocator allocator,
    701  uint32_t frameIndex);
    702 
    705 typedef struct VmaStatInfo
    706 {
    708  uint32_t blockCount;
    710  uint32_t allocationCount;
    714  VkDeviceSize usedBytes;
    716  VkDeviceSize unusedBytes;
    717  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
    718  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
    719 } VmaStatInfo;
    720 
    722 typedef struct VmaStats
    723 {
    724  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    725  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    727 } VmaStats;
    728 
    730 void vmaCalculateStats(
    731  VmaAllocator allocator,
    732  VmaStats* pStats);
    733 
    734 #define VMA_STATS_STRING_ENABLED 1
    735 
    736 #if VMA_STATS_STRING_ENABLED
    737 
    739 
    742  VmaAllocator allocator,
    743  char** ppStatsString,
    744  VkBool32 detailedMap);
    745 
    746 void vmaFreeStatsString(
    747  VmaAllocator allocator,
    748  char* pStatsString);
    749 
    750 #endif // #if VMA_STATS_STRING_ENABLED
    751 
    752 VK_DEFINE_HANDLE(VmaPool)
    753 
    754 typedef enum VmaMemoryUsage
    755 {
    761 
    764 
    767 
    771 
    786 
    831 
    834 typedef VkFlags VmaAllocationCreateFlags;
    835 
    837 {
    839  VmaAllocationCreateFlags flags;
    850  VkMemoryPropertyFlags requiredFlags;
    856  VkMemoryPropertyFlags preferredFlags;
    858  void* pUserData;
    863  VmaPool pool;
    865 
    880 VkResult vmaFindMemoryTypeIndex(
    881  VmaAllocator allocator,
    882  uint32_t memoryTypeBits,
    883  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    884  uint32_t* pMemoryTypeIndex);
    885 
    887 typedef enum VmaPoolCreateFlagBits {
    915 
    918 typedef VkFlags VmaPoolCreateFlags;
    919 
    922 typedef struct VmaPoolCreateInfo {
    925  uint32_t memoryTypeIndex;
    928  VmaPoolCreateFlags flags;
    933  VkDeviceSize blockSize;
    960  uint32_t frameInUseCount;
    962 
    965 typedef struct VmaPoolStats {
    968  VkDeviceSize size;
    971  VkDeviceSize unusedSize;
    984  VkDeviceSize unusedRangeSizeMax;
    985 } VmaPoolStats;
    986 
    993 VkResult vmaCreatePool(
    994  VmaAllocator allocator,
    995  const VmaPoolCreateInfo* pCreateInfo,
    996  VmaPool* pPool);
    997 
    1000 void vmaDestroyPool(
    1001  VmaAllocator allocator,
    1002  VmaPool pool);
    1003 
    1010 void vmaGetPoolStats(
    1011  VmaAllocator allocator,
    1012  VmaPool pool,
    1013  VmaPoolStats* pPoolStats);
    1014 
    1022  VmaAllocator allocator,
    1023  VmaPool pool,
    1024  size_t* pLostAllocationCount);
    1025 
    1026 VK_DEFINE_HANDLE(VmaAllocation)
    1027 
    1028 
    1030 typedef struct VmaAllocationInfo {
    1035  uint32_t memoryType;
    1044  VkDeviceMemory deviceMemory;
    1049  VkDeviceSize offset;
    1054  VkDeviceSize size;
    1065  void* pUserData;
    1067 
    1078 VkResult vmaAllocateMemory(
    1079  VmaAllocator allocator,
    1080  const VkMemoryRequirements* pVkMemoryRequirements,
    1081  const VmaAllocationCreateInfo* pCreateInfo,
    1082  VmaAllocation* pAllocation,
    1083  VmaAllocationInfo* pAllocationInfo);
    1084 
    1092  VmaAllocator allocator,
    1093  VkBuffer buffer,
    1094  const VmaAllocationCreateInfo* pCreateInfo,
    1095  VmaAllocation* pAllocation,
    1096  VmaAllocationInfo* pAllocationInfo);
    1097 
    1099 VkResult vmaAllocateMemoryForImage(
    1100  VmaAllocator allocator,
    1101  VkImage image,
    1102  const VmaAllocationCreateInfo* pCreateInfo,
    1103  VmaAllocation* pAllocation,
    1104  VmaAllocationInfo* pAllocationInfo);
    1105 
    1107 void vmaFreeMemory(
    1108  VmaAllocator allocator,
    1109  VmaAllocation allocation);
    1110 
    1113  VmaAllocator allocator,
    1114  VmaAllocation allocation,
    1115  VmaAllocationInfo* pAllocationInfo);
    1116 
    1119  VmaAllocator allocator,
    1120  VmaAllocation allocation,
    1121  void* pUserData);
    1122 
    1134  VmaAllocator allocator,
    1135  VmaAllocation* pAllocation);
    1136 
    1145 VkResult vmaMapMemory(
    1146  VmaAllocator allocator,
    1147  VmaAllocation allocation,
    1148  void** ppData);
    1149 
    1150 void vmaUnmapMemory(
    1151  VmaAllocator allocator,
    1152  VmaAllocation allocation);
    1153 
    1176 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator);
    1177 
    1185 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator);
    1186 
    1188 typedef struct VmaDefragmentationInfo {
    1193  VkDeviceSize maxBytesToMove;
    1200 
    1202 typedef struct VmaDefragmentationStats {
    1204  VkDeviceSize bytesMoved;
    1206  VkDeviceSize bytesFreed;
    1212 
    1284 VkResult vmaDefragment(
    1285  VmaAllocator allocator,
    1286  VmaAllocation* pAllocations,
    1287  size_t allocationCount,
    1288  VkBool32* pAllocationsChanged,
    1289  const VmaDefragmentationInfo *pDefragmentationInfo,
    1290  VmaDefragmentationStats* pDefragmentationStats);
    1291 
    1318 VkResult vmaCreateBuffer(
    1319  VmaAllocator allocator,
    1320  const VkBufferCreateInfo* pBufferCreateInfo,
    1321  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    1322  VkBuffer* pBuffer,
    1323  VmaAllocation* pAllocation,
    1324  VmaAllocationInfo* pAllocationInfo);
    1325 
    1337 void vmaDestroyBuffer(
    1338  VmaAllocator allocator,
    1339  VkBuffer buffer,
    1340  VmaAllocation allocation);
    1341 
    1343 VkResult vmaCreateImage(
    1344  VmaAllocator allocator,
    1345  const VkImageCreateInfo* pImageCreateInfo,
    1346  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    1347  VkImage* pImage,
    1348  VmaAllocation* pAllocation,
    1349  VmaAllocationInfo* pAllocationInfo);
    1350 
    1362 void vmaDestroyImage(
    1363  VmaAllocator allocator,
    1364  VkImage image,
    1365  VmaAllocation allocation);
    1366 
    1367 #ifdef __cplusplus
    1368 }
    1369 #endif
    1370 
    1371 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    1372 
    1373 // For Visual Studio IntelliSense.
    1374 #ifdef __INTELLISENSE__
    1375 #define VMA_IMPLEMENTATION
    1376 #endif
    1377 
    1378 #ifdef VMA_IMPLEMENTATION
    1379 #undef VMA_IMPLEMENTATION
    1380 
    1381 #include <cstdint>
    1382 #include <cstdlib>
    1383 #include <cstring>
    1384 
    1385 /*******************************************************************************
    1386 CONFIGURATION SECTION
    1387 
    1388 Define some of these macros before each #include of this header or change them
    1389 here if you need other then default behavior depending on your environment.
    1390 */
    1391 
    1392 /*
    1393 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    1394 internally, like:
    1395 
    1396  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    1397 
    1398 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    1399 VmaAllocatorCreateInfo::pVulkanFunctions.
    1400 */
    1401 #ifndef VMA_STATIC_VULKAN_FUNCTIONS
    1402 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    1403 #endif
    1404 
    1405 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    1406 //#define VMA_USE_STL_CONTAINERS 1
    1407 
    1408 /* Set this macro to 1 to make the library including and using STL containers:
    1409 std::pair, std::vector, std::list, std::unordered_map.
    1410 
    1411 Set it to 0 or undefined to make the library using its own implementation of
    1412 the containers.
    1413 */
    1414 #if VMA_USE_STL_CONTAINERS
    1415  #define VMA_USE_STL_VECTOR 1
    1416  #define VMA_USE_STL_UNORDERED_MAP 1
    1417  #define VMA_USE_STL_LIST 1
    1418 #endif
    1419 
    1420 #if VMA_USE_STL_VECTOR
    1421  #include <vector>
    1422 #endif
    1423 
    1424 #if VMA_USE_STL_UNORDERED_MAP
    1425  #include <unordered_map>
    1426 #endif
    1427 
    1428 #if VMA_USE_STL_LIST
    1429  #include <list>
    1430 #endif
    1431 
    1432 /*
    1433 Following headers are used in this CONFIGURATION section only, so feel free to
    1434 remove them if not needed.
    1435 */
    1436 #include <cassert> // for assert
    1437 #include <algorithm> // for min, max
    1438 #include <mutex> // for std::mutex
    1439 #include <atomic> // for std::atomic
    1440 
    1441 #if !defined(_WIN32)
    1442  #include <malloc.h> // for aligned_alloc()
    1443 #endif
    1444 
    1445 // Normal assert to check for programmer's errors, especially in Debug configuration.
    1446 #ifndef VMA_ASSERT
    1447  #ifdef _DEBUG
    1448  #define VMA_ASSERT(expr) assert(expr)
    1449  #else
    1450  #define VMA_ASSERT(expr)
    1451  #endif
    1452 #endif
    1453 
    1454 // Assert that will be called very often, like inside data structures e.g. operator[].
    1455 // Making it non-empty can make program slow.
    1456 #ifndef VMA_HEAVY_ASSERT
    1457  #ifdef _DEBUG
    1458  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    1459  #else
    1460  #define VMA_HEAVY_ASSERT(expr)
    1461  #endif
    1462 #endif
    1463 
    1464 #ifndef VMA_NULL
    1465  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    1466  #define VMA_NULL nullptr
    1467 #endif
    1468 
    1469 #ifndef VMA_ALIGN_OF
    1470  #define VMA_ALIGN_OF(type) (__alignof(type))
    1471 #endif
    1472 
    1473 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    1474  #if defined(_WIN32)
    1475  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    1476  #else
    1477  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    1478  #endif
    1479 #endif
    1480 
    1481 #ifndef VMA_SYSTEM_FREE
    1482  #if defined(_WIN32)
    1483  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    1484  #else
    1485  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    1486  #endif
    1487 #endif
    1488 
    1489 #ifndef VMA_MIN
    1490  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    1491 #endif
    1492 
    1493 #ifndef VMA_MAX
    1494  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    1495 #endif
    1496 
    1497 #ifndef VMA_SWAP
    1498  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    1499 #endif
    1500 
    1501 #ifndef VMA_SORT
    1502  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    1503 #endif
    1504 
    1505 #ifndef VMA_DEBUG_LOG
    1506  #define VMA_DEBUG_LOG(format, ...)
    1507  /*
    1508  #define VMA_DEBUG_LOG(format, ...) do { \
    1509  printf(format, __VA_ARGS__); \
    1510  printf("\n"); \
    1511  } while(false)
    1512  */
    1513 #endif
    1514 
    1515 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    1516 #if VMA_STATS_STRING_ENABLED
    1517  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    1518  {
    1519  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    1520  }
    1521  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    1522  {
    1523  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    1524  }
    1525  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    1526  {
    1527  snprintf(outStr, strLen, "%p", ptr);
    1528  }
    1529 #endif
    1530 
    1531 #ifndef VMA_MUTEX
    1532  class VmaMutex
    1533  {
    1534  public:
    1535  VmaMutex() { }
    1536  ~VmaMutex() { }
    1537  void Lock() { m_Mutex.lock(); }
    1538  void Unlock() { m_Mutex.unlock(); }
    1539  private:
    1540  std::mutex m_Mutex;
    1541  };
    1542  #define VMA_MUTEX VmaMutex
    1543 #endif
    1544 
    1545 /*
    1546 If providing your own implementation, you need to implement a subset of std::atomic:
    1547 
    1548 - Constructor(uint32_t desired)
    1549 - uint32_t load() const
    1550 - void store(uint32_t desired)
    1551 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    1552 */
    1553 #ifndef VMA_ATOMIC_UINT32
    1554  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    1555 #endif
    1556 
    1557 #ifndef VMA_BEST_FIT
    1558 
    1570  #define VMA_BEST_FIT (1)
    1571 #endif
    1572 
    1573 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    1574 
    1578  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    1579 #endif
    1580 
    1581 #ifndef VMA_DEBUG_ALIGNMENT
    1582 
    1586  #define VMA_DEBUG_ALIGNMENT (1)
    1587 #endif
    1588 
    1589 #ifndef VMA_DEBUG_MARGIN
    1590 
    1594  #define VMA_DEBUG_MARGIN (0)
    1595 #endif
    1596 
    1597 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    1598 
    1602  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    1603 #endif
    1604 
    1605 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    1606 
    1610  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    1611 #endif
    1612 
    1613 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    1614  #define VMA_SMALL_HEAP_MAX_SIZE (512 * 1024 * 1024)
    1616 #endif
    1617 
    1618 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    1619  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256 * 1024 * 1024)
    1621 #endif
    1622 
    1623 #ifndef VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE
    1624  #define VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE (64 * 1024 * 1024)
    1626 #endif
    1627 
    1628 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    1629 
    1630 /*******************************************************************************
    1631 END OF CONFIGURATION
    1632 */
    1633 
    1634 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    1635  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    1636 
    1637 // Returns number of bits set to 1 in (v).
    1638 static inline uint32_t CountBitsSet(uint32_t v)
    1639 {
    1640  uint32_t c = v - ((v >> 1) & 0x55555555);
    1641  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    1642  c = ((c >> 4) + c) & 0x0F0F0F0F;
    1643  c = ((c >> 8) + c) & 0x00FF00FF;
    1644  c = ((c >> 16) + c) & 0x0000FFFF;
    1645  return c;
    1646 }
    1647 
    1648 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    1649 // Use types like uint32_t, uint64_t as T.
    1650 template <typename T>
    1651 static inline T VmaAlignUp(T val, T align)
    1652 {
    1653  return (val + align - 1) / align * align;
    1654 }
    1655 
    1656 // Division with mathematical rounding to nearest number.
    1657 template <typename T>
    1658 inline T VmaRoundDiv(T x, T y)
    1659 {
    1660  return (x + (y / (T)2)) / y;
    1661 }
    1662 
    1663 #ifndef VMA_SORT
    1664 
    1665 template<typename Iterator, typename Compare>
    1666 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    1667 {
    1668  Iterator centerValue = end; --centerValue;
    1669  Iterator insertIndex = beg;
    1670  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    1671  {
    1672  if(cmp(*memTypeIndex, *centerValue))
    1673  {
    1674  if(insertIndex != memTypeIndex)
    1675  {
    1676  VMA_SWAP(*memTypeIndex, *insertIndex);
    1677  }
    1678  ++insertIndex;
    1679  }
    1680  }
    1681  if(insertIndex != centerValue)
    1682  {
    1683  VMA_SWAP(*insertIndex, *centerValue);
    1684  }
    1685  return insertIndex;
    1686 }
    1687 
    1688 template<typename Iterator, typename Compare>
    1689 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    1690 {
    1691  if(beg < end)
    1692  {
    1693  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    1694  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    1695  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    1696  }
    1697 }
    1698 
    1699 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    1700 
    1701 #endif // #ifndef VMA_SORT
    1702 
    1703 /*
    1704 Returns true if two memory blocks occupy overlapping pages.
    1705 ResourceA must be in less memory offset than ResourceB.
    1706 
    1707 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    1708 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    1709 */
    1710 static inline bool VmaBlocksOnSamePage(
    1711  VkDeviceSize resourceAOffset,
    1712  VkDeviceSize resourceASize,
    1713  VkDeviceSize resourceBOffset,
    1714  VkDeviceSize pageSize)
    1715 {
    1716  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    1717  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    1718  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    1719  VkDeviceSize resourceBStart = resourceBOffset;
    1720  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    1721  return resourceAEndPage == resourceBStartPage;
    1722 }
    1723 
    1724 enum VmaSuballocationType
    1725 {
    1726  VMA_SUBALLOCATION_TYPE_FREE = 0,
    1727  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    1728  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    1729  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    1730  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    1731  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    1732  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    1733 };
    1734 
    1735 /*
    1736 Returns true if given suballocation types could conflict and must respect
    1737 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    1738 or linear image and another one is optimal image. If type is unknown, behave
    1739 conservatively.
    1740 */
    1741 static inline bool VmaIsBufferImageGranularityConflict(
    1742  VmaSuballocationType suballocType1,
    1743  VmaSuballocationType suballocType2)
    1744 {
    1745  if(suballocType1 > suballocType2)
    1746  {
    1747  VMA_SWAP(suballocType1, suballocType2);
    1748  }
    1749 
    1750  switch(suballocType1)
    1751  {
    1752  case VMA_SUBALLOCATION_TYPE_FREE:
    1753  return false;
    1754  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    1755  return true;
    1756  case VMA_SUBALLOCATION_TYPE_BUFFER:
    1757  return
    1758  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    1759  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    1760  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    1761  return
    1762  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    1763  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    1764  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    1765  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    1766  return
    1767  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    1768  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    1769  return false;
    1770  default:
    1771  VMA_ASSERT(0);
    1772  return true;
    1773  }
    1774 }
    1775 
    1776 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    1777 struct VmaMutexLock
    1778 {
    1779 public:
    1780  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
    1781  m_pMutex(useMutex ? &mutex : VMA_NULL)
    1782  {
    1783  if(m_pMutex)
    1784  {
    1785  m_pMutex->Lock();
    1786  }
    1787  }
    1788 
    1789  ~VmaMutexLock()
    1790  {
    1791  if(m_pMutex)
    1792  {
    1793  m_pMutex->Unlock();
    1794  }
    1795  }
    1796 
    1797 private:
    1798  VMA_MUTEX* m_pMutex;
    1799 };
    1800 
    1801 #if VMA_DEBUG_GLOBAL_MUTEX
    1802  static VMA_MUTEX gDebugGlobalMutex;
    1803  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    1804 #else
    1805  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    1806 #endif
    1807 
    1808 // Minimum size of a free suballocation to register it in the free suballocation collection.
    1809 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    1810 
    1811 /*
    1812 Performs binary search and returns iterator to first element that is greater or
    1813 equal to (key), according to comparison (cmp).
    1814 
    1815 Cmp should return true if first argument is less than second argument.
    1816 
    1817 Returned value is the found element, if present in the collection or place where
    1818 new element with value (key) should be inserted.
    1819 */
    1820 template <typename IterT, typename KeyT, typename CmpT>
    1821 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
    1822 {
    1823  size_t down = 0, up = (end - beg);
    1824  while(down < up)
    1825  {
    1826  const size_t mid = (down + up) / 2;
    1827  if(cmp(*(beg+mid), key))
    1828  {
    1829  down = mid + 1;
    1830  }
    1831  else
    1832  {
    1833  up = mid;
    1834  }
    1835  }
    1836  return beg + down;
    1837 }
    1838 
    1840 // Memory allocation
    1841 
    1842 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    1843 {
    1844  if((pAllocationCallbacks != VMA_NULL) &&
    1845  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    1846  {
    1847  return (*pAllocationCallbacks->pfnAllocation)(
    1848  pAllocationCallbacks->pUserData,
    1849  size,
    1850  alignment,
    1851  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    1852  }
    1853  else
    1854  {
    1855  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    1856  }
    1857 }
    1858 
    1859 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    1860 {
    1861  if((pAllocationCallbacks != VMA_NULL) &&
    1862  (pAllocationCallbacks->pfnFree != VMA_NULL))
    1863  {
    1864  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    1865  }
    1866  else
    1867  {
    1868  VMA_SYSTEM_FREE(ptr);
    1869  }
    1870 }
    1871 
    1872 template<typename T>
    1873 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    1874 {
    1875  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    1876 }
    1877 
    1878 template<typename T>
    1879 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    1880 {
    1881  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    1882 }
    1883 
    1884 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    1885 
    1886 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    1887 
    1888 template<typename T>
    1889 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    1890 {
    1891  ptr->~T();
    1892  VmaFree(pAllocationCallbacks, ptr);
    1893 }
    1894 
    1895 template<typename T>
    1896 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    1897 {
    1898  if(ptr != VMA_NULL)
    1899  {
    1900  for(size_t i = count; i--; )
    1901  {
    1902  ptr[i].~T();
    1903  }
    1904  VmaFree(pAllocationCallbacks, ptr);
    1905  }
    1906 }
    1907 
    1908 // STL-compatible allocator.
    1909 template<typename T>
    1910 class VmaStlAllocator
    1911 {
    1912 public:
    1913  const VkAllocationCallbacks* const m_pCallbacks;
    1914  typedef T value_type;
    1915 
    1916  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    1917  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    1918 
    1919  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    1920  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    1921 
    1922  template<typename U>
    1923  bool operator==(const VmaStlAllocator<U>& rhs) const
    1924  {
    1925  return m_pCallbacks == rhs.m_pCallbacks;
    1926  }
    1927  template<typename U>
    1928  bool operator!=(const VmaStlAllocator<U>& rhs) const
    1929  {
    1930  return m_pCallbacks != rhs.m_pCallbacks;
    1931  }
    1932 
    1933  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    1934 };
    1935 
    1936 #if VMA_USE_STL_VECTOR
    1937 
    1938 #define VmaVector std::vector
    1939 
    1940 template<typename T, typename allocatorT>
    1941 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    1942 {
    1943  vec.insert(vec.begin() + index, item);
    1944 }
    1945 
    1946 template<typename T, typename allocatorT>
    1947 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    1948 {
    1949  vec.erase(vec.begin() + index);
    1950 }
    1951 
    1952 #else // #if VMA_USE_STL_VECTOR
    1953 
    1954 /* Class with interface compatible with subset of std::vector.
    1955 T must be POD because constructors and destructors are not called and memcpy is
    1956 used for these objects. */
    1957 template<typename T, typename AllocatorT>
    1958 class VmaVector
    1959 {
    1960 public:
    1961  typedef T value_type;
    1962 
    1963  VmaVector(const AllocatorT& allocator) :
    1964  m_Allocator(allocator),
    1965  m_pArray(VMA_NULL),
    1966  m_Count(0),
    1967  m_Capacity(0)
    1968  {
    1969  }
    1970 
    1971  VmaVector(size_t count, const AllocatorT& allocator) :
    1972  m_Allocator(allocator),
    1973  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    1974  m_Count(count),
    1975  m_Capacity(count)
    1976  {
    1977  }
    1978 
    1979  VmaVector(const VmaVector<T, AllocatorT>& src) :
    1980  m_Allocator(src.m_Allocator),
    1981  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    1982  m_Count(src.m_Count),
    1983  m_Capacity(src.m_Count)
    1984  {
    1985  if(m_Count != 0)
    1986  {
    1987  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    1988  }
    1989  }
    1990 
    1991  ~VmaVector()
    1992  {
    1993  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    1994  }
    1995 
    1996  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    1997  {
    1998  if(&rhs != this)
    1999  {
    2000  resize(rhs.m_Count);
    2001  if(m_Count != 0)
    2002  {
    2003  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    2004  }
    2005  }
    2006  return *this;
    2007  }
    2008 
    2009  bool empty() const { return m_Count == 0; }
    2010  size_t size() const { return m_Count; }
    2011  T* data() { return m_pArray; }
    2012  const T* data() const { return m_pArray; }
    2013 
    2014  T& operator[](size_t index)
    2015  {
    2016  VMA_HEAVY_ASSERT(index < m_Count);
    2017  return m_pArray[index];
    2018  }
    2019  const T& operator[](size_t index) const
    2020  {
    2021  VMA_HEAVY_ASSERT(index < m_Count);
    2022  return m_pArray[index];
    2023  }
    2024 
    2025  T& front()
    2026  {
    2027  VMA_HEAVY_ASSERT(m_Count > 0);
    2028  return m_pArray[0];
    2029  }
    2030  const T& front() const
    2031  {
    2032  VMA_HEAVY_ASSERT(m_Count > 0);
    2033  return m_pArray[0];
    2034  }
    2035  T& back()
    2036  {
    2037  VMA_HEAVY_ASSERT(m_Count > 0);
    2038  return m_pArray[m_Count - 1];
    2039  }
    2040  const T& back() const
    2041  {
    2042  VMA_HEAVY_ASSERT(m_Count > 0);
    2043  return m_pArray[m_Count - 1];
    2044  }
    2045 
    2046  void reserve(size_t newCapacity, bool freeMemory = false)
    2047  {
    2048  newCapacity = VMA_MAX(newCapacity, m_Count);
    2049 
    2050  if((newCapacity < m_Capacity) && !freeMemory)
    2051  {
    2052  newCapacity = m_Capacity;
    2053  }
    2054 
    2055  if(newCapacity != m_Capacity)
    2056  {
    2057  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    2058  if(m_Count != 0)
    2059  {
    2060  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    2061  }
    2062  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    2063  m_Capacity = newCapacity;
    2064  m_pArray = newArray;
    2065  }
    2066  }
    2067 
    2068  void resize(size_t newCount, bool freeMemory = false)
    2069  {
    2070  size_t newCapacity = m_Capacity;
    2071  if(newCount > m_Capacity)
    2072  {
    2073  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    2074  }
    2075  else if(freeMemory)
    2076  {
    2077  newCapacity = newCount;
    2078  }
    2079 
    2080  if(newCapacity != m_Capacity)
    2081  {
    2082  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    2083  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    2084  if(elementsToCopy != 0)
    2085  {
    2086  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    2087  }
    2088  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    2089  m_Capacity = newCapacity;
    2090  m_pArray = newArray;
    2091  }
    2092 
    2093  m_Count = newCount;
    2094  }
    2095 
    2096  void clear(bool freeMemory = false)
    2097  {
    2098  resize(0, freeMemory);
    2099  }
    2100 
    2101  void insert(size_t index, const T& src)
    2102  {
    2103  VMA_HEAVY_ASSERT(index <= m_Count);
    2104  const size_t oldCount = size();
    2105  resize(oldCount + 1);
    2106  if(index < oldCount)
    2107  {
    2108  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    2109  }
    2110  m_pArray[index] = src;
    2111  }
    2112 
    2113  void remove(size_t index)
    2114  {
    2115  VMA_HEAVY_ASSERT(index < m_Count);
    2116  const size_t oldCount = size();
    2117  if(index < oldCount - 1)
    2118  {
    2119  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    2120  }
    2121  resize(oldCount - 1);
    2122  }
    2123 
    2124  void push_back(const T& src)
    2125  {
    2126  const size_t newIndex = size();
    2127  resize(newIndex + 1);
    2128  m_pArray[newIndex] = src;
    2129  }
    2130 
    2131  void pop_back()
    2132  {
    2133  VMA_HEAVY_ASSERT(m_Count > 0);
    2134  resize(size() - 1);
    2135  }
    2136 
    2137  void push_front(const T& src)
    2138  {
    2139  insert(0, src);
    2140  }
    2141 
    2142  void pop_front()
    2143  {
    2144  VMA_HEAVY_ASSERT(m_Count > 0);
    2145  remove(0);
    2146  }
    2147 
    2148  typedef T* iterator;
    2149 
    2150  iterator begin() { return m_pArray; }
    2151  iterator end() { return m_pArray + m_Count; }
    2152 
    2153 private:
    2154  AllocatorT m_Allocator;
    2155  T* m_pArray;
    2156  size_t m_Count;
    2157  size_t m_Capacity;
    2158 };
    2159 
    2160 template<typename T, typename allocatorT>
    2161 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    2162 {
    2163  vec.insert(index, item);
    2164 }
    2165 
    2166 template<typename T, typename allocatorT>
    2167 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    2168 {
    2169  vec.remove(index);
    2170 }
    2171 
    2172 #endif // #if VMA_USE_STL_VECTOR
    2173 
    2174 template<typename CmpLess, typename VectorT>
    2175 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    2176 {
    2177  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    2178  vector.data(),
    2179  vector.data() + vector.size(),
    2180  value,
    2181  CmpLess()) - vector.data();
    2182  VmaVectorInsert(vector, indexToInsert, value);
    2183  return indexToInsert;
    2184 }
    2185 
    2186 template<typename CmpLess, typename VectorT>
    2187 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    2188 {
    2189  CmpLess comparator;
    2190  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    2191  vector.begin(),
    2192  vector.end(),
    2193  value,
    2194  comparator);
    2195  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    2196  {
    2197  size_t indexToRemove = it - vector.begin();
    2198  VmaVectorRemove(vector, indexToRemove);
    2199  return true;
    2200  }
    2201  return false;
    2202 }
    2203 
    2204 template<typename CmpLess, typename VectorT>
    2205 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
    2206 {
    2207  CmpLess comparator;
    2208  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    2209  vector.data(),
    2210  vector.data() + vector.size(),
    2211  value,
    2212  comparator);
    2213  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
    2214  {
    2215  return it - vector.begin();
    2216  }
    2217  else
    2218  {
    2219  return vector.size();
    2220  }
    2221 }
    2222 
    2224 // class VmaPoolAllocator
    2225 
    2226 /*
    2227 Allocator for objects of type T using a list of arrays (pools) to speed up
    2228 allocation. Number of elements that can be allocated is not bounded because
    2229 allocator can create multiple blocks.
    2230 */
    2231 template<typename T>
    2232 class VmaPoolAllocator
    2233 {
    2234 public:
    2235  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
    2236  ~VmaPoolAllocator();
    2237  void Clear();
    2238  T* Alloc();
    2239  void Free(T* ptr);
    2240 
    2241 private:
    2242  union Item
    2243  {
    2244  uint32_t NextFreeIndex;
    2245  T Value;
    2246  };
    2247 
    2248  struct ItemBlock
    2249  {
    2250  Item* pItems;
    2251  uint32_t FirstFreeIndex;
    2252  };
    2253 
    2254  const VkAllocationCallbacks* m_pAllocationCallbacks;
    2255  size_t m_ItemsPerBlock;
    2256  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    2257 
    2258  ItemBlock& CreateNewBlock();
    2259 };
    2260 
    2261 template<typename T>
    2262 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
    2263  m_pAllocationCallbacks(pAllocationCallbacks),
    2264  m_ItemsPerBlock(itemsPerBlock),
    2265  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    2266 {
    2267  VMA_ASSERT(itemsPerBlock > 0);
    2268 }
    2269 
    2270 template<typename T>
    2271 VmaPoolAllocator<T>::~VmaPoolAllocator()
    2272 {
    2273  Clear();
    2274 }
    2275 
    2276 template<typename T>
    2277 void VmaPoolAllocator<T>::Clear()
    2278 {
    2279  for(size_t i = m_ItemBlocks.size(); i--; )
    2280  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
    2281  m_ItemBlocks.clear();
    2282 }
    2283 
    2284 template<typename T>
    2285 T* VmaPoolAllocator<T>::Alloc()
    2286 {
    2287  for(size_t i = m_ItemBlocks.size(); i--; )
    2288  {
    2289  ItemBlock& block = m_ItemBlocks[i];
    2290  // This block has some free items: Use first one.
    2291  if(block.FirstFreeIndex != UINT32_MAX)
    2292  {
    2293  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    2294  block.FirstFreeIndex = pItem->NextFreeIndex;
    2295  return &pItem->Value;
    2296  }
    2297  }
    2298 
    2299  // No block has free item: Create new one and use it.
    2300  ItemBlock& newBlock = CreateNewBlock();
    2301  Item* const pItem = &newBlock.pItems[0];
    2302  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    2303  return &pItem->Value;
    2304 }
    2305 
    2306 template<typename T>
    2307 void VmaPoolAllocator<T>::Free(T* ptr)
    2308 {
    2309  // Search all memory blocks to find ptr.
    2310  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
    2311  {
    2312  ItemBlock& block = m_ItemBlocks[i];
    2313 
    2314  // Casting to union.
    2315  Item* pItemPtr;
    2316  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    2317 
    2318  // Check if pItemPtr is in address range of this block.
    2319  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
    2320  {
    2321  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    2322  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    2323  block.FirstFreeIndex = index;
    2324  return;
    2325  }
    2326  }
    2327  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    2328 }
    2329 
    2330 template<typename T>
    2331 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    2332 {
    2333  ItemBlock newBlock = {
    2334  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
    2335 
    2336  m_ItemBlocks.push_back(newBlock);
    2337 
    2338  // Setup singly-linked list of all free items in this block.
    2339  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
    2340  newBlock.pItems[i].NextFreeIndex = i + 1;
    2341  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
    2342  return m_ItemBlocks.back();
    2343 }
    2344 
    2346 // class VmaRawList, VmaList
    2347 
    2348 #if VMA_USE_STL_LIST
    2349 
    2350 #define VmaList std::list
    2351 
    2352 #else // #if VMA_USE_STL_LIST
    2353 
    2354 template<typename T>
    2355 struct VmaListItem
    2356 {
    2357  VmaListItem* pPrev;
    2358  VmaListItem* pNext;
    2359  T Value;
    2360 };
    2361 
    2362 // Doubly linked list.
    2363 template<typename T>
    2364 class VmaRawList
    2365 {
    2366 public:
    2367  typedef VmaListItem<T> ItemType;
    2368 
    2369  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    2370  ~VmaRawList();
    2371  void Clear();
    2372 
    2373  size_t GetCount() const { return m_Count; }
    2374  bool IsEmpty() const { return m_Count == 0; }
    2375 
    2376  ItemType* Front() { return m_pFront; }
    2377  const ItemType* Front() const { return m_pFront; }
    2378  ItemType* Back() { return m_pBack; }
    2379  const ItemType* Back() const { return m_pBack; }
    2380 
    2381  ItemType* PushBack();
    2382  ItemType* PushFront();
    2383  ItemType* PushBack(const T& value);
    2384  ItemType* PushFront(const T& value);
    2385  void PopBack();
    2386  void PopFront();
    2387 
    2388  // Item can be null - it means PushBack.
    2389  ItemType* InsertBefore(ItemType* pItem);
    2390  // Item can be null - it means PushFront.
    2391  ItemType* InsertAfter(ItemType* pItem);
    2392 
    2393  ItemType* InsertBefore(ItemType* pItem, const T& value);
    2394  ItemType* InsertAfter(ItemType* pItem, const T& value);
    2395 
    2396  void Remove(ItemType* pItem);
    2397 
    2398 private:
    2399  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    2400  VmaPoolAllocator<ItemType> m_ItemAllocator;
    2401  ItemType* m_pFront;
    2402  ItemType* m_pBack;
    2403  size_t m_Count;
    2404 
    2405  // Declared not defined, to block copy constructor and assignment operator.
    2406  VmaRawList(const VmaRawList<T>& src);
    2407  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
    2408 };
    2409 
    2410 template<typename T>
    2411 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    2412  m_pAllocationCallbacks(pAllocationCallbacks),
    2413  m_ItemAllocator(pAllocationCallbacks, 128),
    2414  m_pFront(VMA_NULL),
    2415  m_pBack(VMA_NULL),
    2416  m_Count(0)
    2417 {
    2418 }
    2419 
    2420 template<typename T>
    2421 VmaRawList<T>::~VmaRawList()
    2422 {
    2423  // Intentionally not calling Clear, because that would be unnecessary
    2424  // computations to return all items to m_ItemAllocator as free.
    2425 }
    2426 
    2427 template<typename T>
    2428 void VmaRawList<T>::Clear()
    2429 {
    2430  if(IsEmpty() == false)
    2431  {
    2432  ItemType* pItem = m_pBack;
    2433  while(pItem != VMA_NULL)
    2434  {
    2435  ItemType* const pPrevItem = pItem->pPrev;
    2436  m_ItemAllocator.Free(pItem);
    2437  pItem = pPrevItem;
    2438  }
    2439  m_pFront = VMA_NULL;
    2440  m_pBack = VMA_NULL;
    2441  m_Count = 0;
    2442  }
    2443 }
    2444 
    2445 template<typename T>
    2446 VmaListItem<T>* VmaRawList<T>::PushBack()
    2447 {
    2448  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    2449  pNewItem->pNext = VMA_NULL;
    2450  if(IsEmpty())
    2451  {
    2452  pNewItem->pPrev = VMA_NULL;
    2453  m_pFront = pNewItem;
    2454  m_pBack = pNewItem;
    2455  m_Count = 1;
    2456  }
    2457  else
    2458  {
    2459  pNewItem->pPrev = m_pBack;
    2460  m_pBack->pNext = pNewItem;
    2461  m_pBack = pNewItem;
    2462  ++m_Count;
    2463  }
    2464  return pNewItem;
    2465 }
    2466 
    2467 template<typename T>
    2468 VmaListItem<T>* VmaRawList<T>::PushFront()
    2469 {
    2470  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    2471  pNewItem->pPrev = VMA_NULL;
    2472  if(IsEmpty())
    2473  {
    2474  pNewItem->pNext = VMA_NULL;
    2475  m_pFront = pNewItem;
    2476  m_pBack = pNewItem;
    2477  m_Count = 1;
    2478  }
    2479  else
    2480  {
    2481  pNewItem->pNext = m_pFront;
    2482  m_pFront->pPrev = pNewItem;
    2483  m_pFront = pNewItem;
    2484  ++m_Count;
    2485  }
    2486  return pNewItem;
    2487 }
    2488 
    2489 template<typename T>
    2490 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    2491 {
    2492  ItemType* const pNewItem = PushBack();
    2493  pNewItem->Value = value;
    2494  return pNewItem;
    2495 }
    2496 
    2497 template<typename T>
    2498 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    2499 {
    2500  ItemType* const pNewItem = PushFront();
    2501  pNewItem->Value = value;
    2502  return pNewItem;
    2503 }
    2504 
    2505 template<typename T>
    2506 void VmaRawList<T>::PopBack()
    2507 {
    2508  VMA_HEAVY_ASSERT(m_Count > 0);
    2509  ItemType* const pBackItem = m_pBack;
    2510  ItemType* const pPrevItem = pBackItem->pPrev;
    2511  if(pPrevItem != VMA_NULL)
    2512  {
    2513  pPrevItem->pNext = VMA_NULL;
    2514  }
    2515  m_pBack = pPrevItem;
    2516  m_ItemAllocator.Free(pBackItem);
    2517  --m_Count;
    2518 }
    2519 
    2520 template<typename T>
    2521 void VmaRawList<T>::PopFront()
    2522 {
    2523  VMA_HEAVY_ASSERT(m_Count > 0);
    2524  ItemType* const pFrontItem = m_pFront;
    2525  ItemType* const pNextItem = pFrontItem->pNext;
    2526  if(pNextItem != VMA_NULL)
    2527  {
    2528  pNextItem->pPrev = VMA_NULL;
    2529  }
    2530  m_pFront = pNextItem;
    2531  m_ItemAllocator.Free(pFrontItem);
    2532  --m_Count;
    2533 }
    2534 
    2535 template<typename T>
    2536 void VmaRawList<T>::Remove(ItemType* pItem)
    2537 {
    2538  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    2539  VMA_HEAVY_ASSERT(m_Count > 0);
    2540 
    2541  if(pItem->pPrev != VMA_NULL)
    2542  {
    2543  pItem->pPrev->pNext = pItem->pNext;
    2544  }
    2545  else
    2546  {
    2547  VMA_HEAVY_ASSERT(m_pFront == pItem);
    2548  m_pFront = pItem->pNext;
    2549  }
    2550 
    2551  if(pItem->pNext != VMA_NULL)
    2552  {
    2553  pItem->pNext->pPrev = pItem->pPrev;
    2554  }
    2555  else
    2556  {
    2557  VMA_HEAVY_ASSERT(m_pBack == pItem);
    2558  m_pBack = pItem->pPrev;
    2559  }
    2560 
    2561  m_ItemAllocator.Free(pItem);
    2562  --m_Count;
    2563 }
    2564 
    2565 template<typename T>
    2566 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    2567 {
    2568  if(pItem != VMA_NULL)
    2569  {
    2570  ItemType* const prevItem = pItem->pPrev;
    2571  ItemType* const newItem = m_ItemAllocator.Alloc();
    2572  newItem->pPrev = prevItem;
    2573  newItem->pNext = pItem;
    2574  pItem->pPrev = newItem;
    2575  if(prevItem != VMA_NULL)
    2576  {
    2577  prevItem->pNext = newItem;
    2578  }
    2579  else
    2580  {
    2581  VMA_HEAVY_ASSERT(m_pFront == pItem);
    2582  m_pFront = newItem;
    2583  }
    2584  ++m_Count;
    2585  return newItem;
    2586  }
    2587  else
    2588  return PushBack();
    2589 }
    2590 
    2591 template<typename T>
    2592 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    2593 {
    2594  if(pItem != VMA_NULL)
    2595  {
    2596  ItemType* const nextItem = pItem->pNext;
    2597  ItemType* const newItem = m_ItemAllocator.Alloc();
    2598  newItem->pNext = nextItem;
    2599  newItem->pPrev = pItem;
    2600  pItem->pNext = newItem;
    2601  if(nextItem != VMA_NULL)
    2602  {
    2603  nextItem->pPrev = newItem;
    2604  }
    2605  else
    2606  {
    2607  VMA_HEAVY_ASSERT(m_pBack == pItem);
    2608  m_pBack = newItem;
    2609  }
    2610  ++m_Count;
    2611  return newItem;
    2612  }
    2613  else
    2614  return PushFront();
    2615 }
    2616 
    2617 template<typename T>
    2618 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    2619 {
    2620  ItemType* const newItem = InsertBefore(pItem);
    2621  newItem->Value = value;
    2622  return newItem;
    2623 }
    2624 
    2625 template<typename T>
    2626 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    2627 {
    2628  ItemType* const newItem = InsertAfter(pItem);
    2629  newItem->Value = value;
    2630  return newItem;
    2631 }
    2632 
    2633 template<typename T, typename AllocatorT>
    2634 class VmaList
    2635 {
    2636 public:
    2637  class iterator
    2638  {
    2639  public:
    2640  iterator() :
    2641  m_pList(VMA_NULL),
    2642  m_pItem(VMA_NULL)
    2643  {
    2644  }
    2645 
    2646  T& operator*() const
    2647  {
    2648  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2649  return m_pItem->Value;
    2650  }
    2651  T* operator->() const
    2652  {
    2653  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2654  return &m_pItem->Value;
    2655  }
    2656 
    2657  iterator& operator++()
    2658  {
    2659  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2660  m_pItem = m_pItem->pNext;
    2661  return *this;
    2662  }
    2663  iterator& operator--()
    2664  {
    2665  if(m_pItem != VMA_NULL)
    2666  {
    2667  m_pItem = m_pItem->pPrev;
    2668  }
    2669  else
    2670  {
    2671  VMA_HEAVY_ASSERT(!m_pList.IsEmpty());
    2672  m_pItem = m_pList->Back();
    2673  }
    2674  return *this;
    2675  }
    2676 
    2677  iterator operator++(int)
    2678  {
    2679  iterator result = *this;
    2680  ++*this;
    2681  return result;
    2682  }
    2683  iterator operator--(int)
    2684  {
    2685  iterator result = *this;
    2686  --*this;
    2687  return result;
    2688  }
    2689 
    2690  bool operator==(const iterator& rhs) const
    2691  {
    2692  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    2693  return m_pItem == rhs.m_pItem;
    2694  }
    2695  bool operator!=(const iterator& rhs) const
    2696  {
    2697  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    2698  return m_pItem != rhs.m_pItem;
    2699  }
    2700 
    2701  private:
    2702  VmaRawList<T>* m_pList;
    2703  VmaListItem<T>* m_pItem;
    2704 
    2705  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    2706  m_pList(pList),
    2707  m_pItem(pItem)
    2708  {
    2709  }
    2710 
    2711  friend class VmaList<T, AllocatorT>;
    2712  };
    2713 
    2714  class const_iterator
    2715  {
    2716  public:
    2717  const_iterator() :
    2718  m_pList(VMA_NULL),
    2719  m_pItem(VMA_NULL)
    2720  {
    2721  }
    2722 
    2723  const_iterator(const iterator& src) :
    2724  m_pList(src.m_pList),
    2725  m_pItem(src.m_pItem)
    2726  {
    2727  }
    2728 
    2729  const T& operator*() const
    2730  {
    2731  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2732  return m_pItem->Value;
    2733  }
    2734  const T* operator->() const
    2735  {
    2736  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2737  return &m_pItem->Value;
    2738  }
    2739 
    2740  const_iterator& operator++()
    2741  {
    2742  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2743  m_pItem = m_pItem->pNext;
    2744  return *this;
    2745  }
    2746  const_iterator& operator--()
    2747  {
    2748  if(m_pItem != VMA_NULL)
    2749  {
    2750  m_pItem = m_pItem->pPrev;
    2751  }
    2752  else
    2753  {
    2754  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    2755  m_pItem = m_pList->Back();
    2756  }
    2757  return *this;
    2758  }
    2759 
    2760  const_iterator operator++(int)
    2761  {
    2762  const_iterator result = *this;
    2763  ++*this;
    2764  return result;
    2765  }
    2766  const_iterator operator--(int)
    2767  {
    2768  const_iterator result = *this;
    2769  --*this;
    2770  return result;
    2771  }
    2772 
    2773  bool operator==(const const_iterator& rhs) const
    2774  {
    2775  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    2776  return m_pItem == rhs.m_pItem;
    2777  }
    2778  bool operator!=(const const_iterator& rhs) const
    2779  {
    2780  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    2781  return m_pItem != rhs.m_pItem;
    2782  }
    2783 
    2784  private:
    2785  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    2786  m_pList(pList),
    2787  m_pItem(pItem)
    2788  {
    2789  }
    2790 
    2791  const VmaRawList<T>* m_pList;
    2792  const VmaListItem<T>* m_pItem;
    2793 
    2794  friend class VmaList<T, AllocatorT>;
    2795  };
    2796 
    2797  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    2798 
    2799  bool empty() const { return m_RawList.IsEmpty(); }
    2800  size_t size() const { return m_RawList.GetCount(); }
    2801 
    2802  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    2803  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    2804 
    2805  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    2806  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    2807 
    2808  void clear() { m_RawList.Clear(); }
    2809  void push_back(const T& value) { m_RawList.PushBack(value); }
    2810  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    2811  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    2812 
    2813 private:
    2814  VmaRawList<T> m_RawList;
    2815 };
    2816 
    2817 #endif // #if VMA_USE_STL_LIST
    2818 
    2820 // class VmaMap
    2821 
    2822 // Unused in this version.
    2823 #if 0
    2824 
    2825 #if VMA_USE_STL_UNORDERED_MAP
    2826 
    2827 #define VmaPair std::pair
    2828 
    2829 #define VMA_MAP_TYPE(KeyT, ValueT) \
    2830  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    2831 
    2832 #else // #if VMA_USE_STL_UNORDERED_MAP
    2833 
    2834 template<typename T1, typename T2>
    2835 struct VmaPair
    2836 {
    2837  T1 first;
    2838  T2 second;
    2839 
    2840  VmaPair() : first(), second() { }
    2841  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    2842 };
    2843 
    2844 /* Class compatible with subset of interface of std::unordered_map.
    2845 KeyT, ValueT must be POD because they will be stored in VmaVector.
    2846 */
    2847 template<typename KeyT, typename ValueT>
    2848 class VmaMap
    2849 {
    2850 public:
    2851  typedef VmaPair<KeyT, ValueT> PairType;
    2852  typedef PairType* iterator;
    2853 
    2854  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    2855 
    2856  iterator begin() { return m_Vector.begin(); }
    2857  iterator end() { return m_Vector.end(); }
    2858 
    2859  void insert(const PairType& pair);
    2860  iterator find(const KeyT& key);
    2861  void erase(iterator it);
    2862 
    2863 private:
    2864  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    2865 };
    2866 
    2867 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    2868 
    2869 template<typename FirstT, typename SecondT>
    2870 struct VmaPairFirstLess
    2871 {
    2872  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    2873  {
    2874  return lhs.first < rhs.first;
    2875  }
    2876  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    2877  {
    2878  return lhs.first < rhsFirst;
    2879  }
    2880 };
    2881 
    2882 template<typename KeyT, typename ValueT>
    2883 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    2884 {
    2885  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    2886  m_Vector.data(),
    2887  m_Vector.data() + m_Vector.size(),
    2888  pair,
    2889  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    2890  VmaVectorInsert(m_Vector, indexToInsert, pair);
    2891 }
    2892 
    2893 template<typename KeyT, typename ValueT>
    2894 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    2895 {
    2896  PairType* it = VmaBinaryFindFirstNotLess(
    2897  m_Vector.data(),
    2898  m_Vector.data() + m_Vector.size(),
    2899  key,
    2900  VmaPairFirstLess<KeyT, ValueT>());
    2901  if((it != m_Vector.end()) && (it->first == key))
    2902  {
    2903  return it;
    2904  }
    2905  else
    2906  {
    2907  return m_Vector.end();
    2908  }
    2909 }
    2910 
    2911 template<typename KeyT, typename ValueT>
    2912 void VmaMap<KeyT, ValueT>::erase(iterator it)
    2913 {
    2914  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    2915 }
    2916 
    2917 #endif // #if VMA_USE_STL_UNORDERED_MAP
    2918 
    2919 #endif // #if 0
    2920 
    2922 
    2923 class VmaDeviceMemoryBlock;
    2924 
    2925 enum VMA_BLOCK_VECTOR_TYPE
    2926 {
    2927  VMA_BLOCK_VECTOR_TYPE_UNMAPPED,
    2928  VMA_BLOCK_VECTOR_TYPE_MAPPED,
    2929  VMA_BLOCK_VECTOR_TYPE_COUNT
    2930 };
    2931 
    2932 static VMA_BLOCK_VECTOR_TYPE VmaAllocationCreateFlagsToBlockVectorType(VmaAllocationCreateFlags flags)
    2933 {
    2934  return (flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0 ?
    2935  VMA_BLOCK_VECTOR_TYPE_MAPPED :
    2936  VMA_BLOCK_VECTOR_TYPE_UNMAPPED;
    2937 }
    2938 
    2939 struct VmaAllocation_T
    2940 {
    2941 public:
    2942  enum ALLOCATION_TYPE
    2943  {
    2944  ALLOCATION_TYPE_NONE,
    2945  ALLOCATION_TYPE_BLOCK,
    2946  ALLOCATION_TYPE_DEDICATED,
    2947  };
    2948 
    2949  VmaAllocation_T(uint32_t currentFrameIndex) :
    2950  m_Alignment(1),
    2951  m_Size(0),
    2952  m_pUserData(VMA_NULL),
    2953  m_Type(ALLOCATION_TYPE_NONE),
    2954  m_SuballocationType(VMA_SUBALLOCATION_TYPE_UNKNOWN),
    2955  m_LastUseFrameIndex(currentFrameIndex)
    2956  {
    2957  }
    2958 
    2959  void InitBlockAllocation(
    2960  VmaPool hPool,
    2961  VmaDeviceMemoryBlock* block,
    2962  VkDeviceSize offset,
    2963  VkDeviceSize alignment,
    2964  VkDeviceSize size,
    2965  VmaSuballocationType suballocationType,
    2966  void* pUserData,
    2967  bool canBecomeLost)
    2968  {
    2969  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    2970  VMA_ASSERT(block != VMA_NULL);
    2971  m_Type = ALLOCATION_TYPE_BLOCK;
    2972  m_Alignment = alignment;
    2973  m_Size = size;
    2974  m_pUserData = pUserData;
    2975  m_SuballocationType = suballocationType;
    2976  m_BlockAllocation.m_hPool = hPool;
    2977  m_BlockAllocation.m_Block = block;
    2978  m_BlockAllocation.m_Offset = offset;
    2979  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    2980  }
    2981 
    2982  void InitLost()
    2983  {
    2984  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    2985  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    2986  m_Type = ALLOCATION_TYPE_BLOCK;
    2987  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
    2988  m_BlockAllocation.m_Block = VMA_NULL;
    2989  m_BlockAllocation.m_Offset = 0;
    2990  m_BlockAllocation.m_CanBecomeLost = true;
    2991  }
    2992 
    2993  void ChangeBlockAllocation(
    2994  VmaDeviceMemoryBlock* block,
    2995  VkDeviceSize offset)
    2996  {
    2997  VMA_ASSERT(block != VMA_NULL);
    2998  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    2999  m_BlockAllocation.m_Block = block;
    3000  m_BlockAllocation.m_Offset = offset;
    3001  }
    3002 
    3003  void InitDedicatedAllocation(
    3004  uint32_t memoryTypeIndex,
    3005  VkDeviceMemory hMemory,
    3006  VmaSuballocationType suballocationType,
    3007  bool persistentMap,
    3008  void* pMappedData,
    3009  VkDeviceSize size,
    3010  void* pUserData)
    3011  {
    3012  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    3013  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    3014  m_Type = ALLOCATION_TYPE_DEDICATED;
    3015  m_Alignment = 0;
    3016  m_Size = size;
    3017  m_pUserData = pUserData;
    3018  m_SuballocationType = suballocationType;
    3019  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    3020  m_DedicatedAllocation.m_hMemory = hMemory;
    3021  m_DedicatedAllocation.m_PersistentMap = persistentMap;
    3022  m_DedicatedAllocation.m_pMappedData = pMappedData;
    3023  }
    3024 
    3025  ALLOCATION_TYPE GetType() const { return m_Type; }
    3026  VkDeviceSize GetAlignment() const { return m_Alignment; }
    3027  VkDeviceSize GetSize() const { return m_Size; }
    3028  void* GetUserData() const { return m_pUserData; }
    3029  void SetUserData(void* pUserData) { m_pUserData = pUserData; }
    3030  VmaSuballocationType GetSuballocationType() const { return m_SuballocationType; }
    3031 
    3032  VmaDeviceMemoryBlock* GetBlock() const
    3033  {
    3034  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    3035  return m_BlockAllocation.m_Block;
    3036  }
    3037  VkDeviceSize GetOffset() const;
    3038  VkDeviceMemory GetMemory() const;
    3039  uint32_t GetMemoryTypeIndex() const;
    3040  VMA_BLOCK_VECTOR_TYPE GetBlockVectorType() const;
    3041  void* GetMappedData() const;
    3042  bool CanBecomeLost() const;
    3043  VmaPool GetPool() const;
    3044 
    3045  VkResult DedicatedAllocMapPersistentlyMappedMemory(VmaAllocator hAllocator);
    3046  void DedicatedAllocUnmapPersistentlyMappedMemory(VmaAllocator hAllocator);
    3047 
    3048  uint32_t GetLastUseFrameIndex() const
    3049  {
    3050  return m_LastUseFrameIndex.load();
    3051  }
    3052  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    3053  {
    3054  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    3055  }
    3056  /*
    3057  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    3058  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    3059  - Else, returns false.
    3060 
    3061  If hAllocation is already lost, assert - you should not call it then.
    3062  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    3063  */
    3064  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    3065 
    3066  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    3067  {
    3068  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    3069  outInfo.blockCount = 1;
    3070  outInfo.allocationCount = 1;
    3071  outInfo.unusedRangeCount = 0;
    3072  outInfo.usedBytes = m_Size;
    3073  outInfo.unusedBytes = 0;
    3074  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    3075  outInfo.unusedRangeSizeMin = UINT64_MAX;
    3076  outInfo.unusedRangeSizeMax = 0;
    3077  }
    3078 
    3079 private:
    3080  VkDeviceSize m_Alignment;
    3081  VkDeviceSize m_Size;
    3082  void* m_pUserData;
    3083  ALLOCATION_TYPE m_Type;
    3084  VmaSuballocationType m_SuballocationType;
    3085  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    3086 
    3087  // Allocation out of VmaDeviceMemoryBlock.
    3088  struct BlockAllocation
    3089  {
    3090  VmaPool m_hPool; // Null if belongs to general memory.
    3091  VmaDeviceMemoryBlock* m_Block;
    3092  VkDeviceSize m_Offset;
    3093  bool m_CanBecomeLost;
    3094  };
    3095 
    3096  // Allocation for an object that has its own private VkDeviceMemory.
    3097  struct DedicatedAllocation
    3098  {
    3099  uint32_t m_MemoryTypeIndex;
    3100  VkDeviceMemory m_hMemory;
    3101  bool m_PersistentMap;
    3102  void* m_pMappedData;
    3103  };
    3104 
    3105  union
    3106  {
    3107  // Allocation out of VmaDeviceMemoryBlock.
    3108  BlockAllocation m_BlockAllocation;
    3109  // Allocation for an object that has its own private VkDeviceMemory.
    3110  DedicatedAllocation m_DedicatedAllocation;
    3111  };
    3112 };
    3113 
    3114 /*
    3115 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    3116 allocated memory block or free.
    3117 */
    3118 struct VmaSuballocation
    3119 {
    3120  VkDeviceSize offset;
    3121  VkDeviceSize size;
    3122  VmaAllocation hAllocation;
    3123  VmaSuballocationType type;
    3124 };
    3125 
    3126 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    3127 
    3128 // Cost of one additional allocation lost, as equivalent in bytes.
    3129 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    3130 
    3131 /*
    3132 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    3133 
    3134 If canMakeOtherLost was false:
    3135 - item points to a FREE suballocation.
    3136 - itemsToMakeLostCount is 0.
    3137 
    3138 If canMakeOtherLost was true:
    3139 - item points to first of sequence of suballocations, which are either FREE,
    3140  or point to VmaAllocations that can become lost.
    3141 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    3142  the requested allocation to succeed.
    3143 */
    3144 struct VmaAllocationRequest
    3145 {
    3146  VkDeviceSize offset;
    3147  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    3148  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    3149  VmaSuballocationList::iterator item;
    3150  size_t itemsToMakeLostCount;
    3151 
    3152  VkDeviceSize CalcCost() const
    3153  {
    3154  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    3155  }
    3156 };
    3157 
    3158 /*
    3159 Data structure used for bookkeeping of allocations and unused ranges of memory
    3160 in a single VkDeviceMemory block.
    3161 */
    3162 class VmaBlockMetadata
    3163 {
    3164 public:
    3165  VmaBlockMetadata(VmaAllocator hAllocator);
    3166  ~VmaBlockMetadata();
    3167  void Init(VkDeviceSize size);
    3168 
    3169  // Validates all data structures inside this object. If not valid, returns false.
    3170  bool Validate() const;
    3171  VkDeviceSize GetSize() const { return m_Size; }
    3172  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    3173  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    3174  VkDeviceSize GetUnusedRangeSizeMax() const;
    3175  // Returns true if this block is empty - contains only single free suballocation.
    3176  bool IsEmpty() const;
    3177 
    3178  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    3179  void AddPoolStats(VmaPoolStats& inoutStats) const;
    3180 
    3181 #if VMA_STATS_STRING_ENABLED
    3182  void PrintDetailedMap(class VmaJsonWriter& json) const;
    3183 #endif
    3184 
    3185  // Creates trivial request for case when block is empty.
    3186  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
    3187 
    3188  // Tries to find a place for suballocation with given parameters inside this block.
    3189  // If succeeded, fills pAllocationRequest and returns true.
    3190  // If failed, returns false.
    3191  bool CreateAllocationRequest(
    3192  uint32_t currentFrameIndex,
    3193  uint32_t frameInUseCount,
    3194  VkDeviceSize bufferImageGranularity,
    3195  VkDeviceSize allocSize,
    3196  VkDeviceSize allocAlignment,
    3197  VmaSuballocationType allocType,
    3198  bool canMakeOtherLost,
    3199  VmaAllocationRequest* pAllocationRequest);
    3200 
    3201  bool MakeRequestedAllocationsLost(
    3202  uint32_t currentFrameIndex,
    3203  uint32_t frameInUseCount,
    3204  VmaAllocationRequest* pAllocationRequest);
    3205 
    3206  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    3207 
    3208  // Makes actual allocation based on request. Request must already be checked and valid.
    3209  void Alloc(
    3210  const VmaAllocationRequest& request,
    3211  VmaSuballocationType type,
    3212  VkDeviceSize allocSize,
    3213  VmaAllocation hAllocation);
    3214 
    3215  // Frees suballocation assigned to given memory region.
    3216  void Free(const VmaAllocation allocation);
    3217 
    3218 private:
    3219  VkDeviceSize m_Size;
    3220  uint32_t m_FreeCount;
    3221  VkDeviceSize m_SumFreeSize;
    3222  VmaSuballocationList m_Suballocations;
    3223  // Suballocations that are free and have size greater than certain threshold.
    3224  // Sorted by size, ascending.
    3225  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    3226 
    3227  bool ValidateFreeSuballocationList() const;
    3228 
    3229  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    3230  // If yes, fills pOffset and returns true. If no, returns false.
    3231  bool CheckAllocation(
    3232  uint32_t currentFrameIndex,
    3233  uint32_t frameInUseCount,
    3234  VkDeviceSize bufferImageGranularity,
    3235  VkDeviceSize allocSize,
    3236  VkDeviceSize allocAlignment,
    3237  VmaSuballocationType allocType,
    3238  VmaSuballocationList::const_iterator suballocItem,
    3239  bool canMakeOtherLost,
    3240  VkDeviceSize* pOffset,
    3241  size_t* itemsToMakeLostCount,
    3242  VkDeviceSize* pSumFreeSize,
    3243  VkDeviceSize* pSumItemSize) const;
    3244  // Given free suballocation, it merges it with following one, which must also be free.
    3245  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    3246  // Releases given suballocation, making it free.
    3247  // Merges it with adjacent free suballocations if applicable.
    3248  // Returns iterator to new free suballocation at this place.
    3249  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    3250  // Given free suballocation, it inserts it into sorted list of
    3251  // m_FreeSuballocationsBySize if it's suitable.
    3252  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    3253  // Given free suballocation, it removes it from sorted list of
    3254  // m_FreeSuballocationsBySize if it's suitable.
    3255  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    3256 };
    3257 
    3258 /*
    3259 Represents a single block of device memory (`VkDeviceMemory`) with all the
    3260 data about its regions (aka suballocations, `VmaAllocation`), assigned and free.
    3261 
    3262 Thread-safety: This class must be externally synchronized.
    3263 */
    3264 class VmaDeviceMemoryBlock
    3265 {
    3266 public:
    3267  uint32_t m_MemoryTypeIndex;
    3268  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
    3269  VkDeviceMemory m_hMemory;
    3270  bool m_PersistentMap;
    3271  void* m_pMappedData;
    3272  VmaBlockMetadata m_Metadata;
    3273 
    3274  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    3275 
    3276  ~VmaDeviceMemoryBlock()
    3277  {
    3278  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    3279  }
    3280 
    3281  // Always call after construction.
    3282  void Init(
    3283  uint32_t newMemoryTypeIndex,
    3284  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
    3285  VkDeviceMemory newMemory,
    3286  VkDeviceSize newSize,
    3287  bool persistentMap,
    3288  void* pMappedData);
    3289  // Always call before destruction.
    3290  void Destroy(VmaAllocator allocator);
    3291 
    3292  // Validates all data structures inside this object. If not valid, returns false.
    3293  bool Validate() const;
    3294 };
    3295 
    3296 struct VmaPointerLess
    3297 {
    3298  bool operator()(const void* lhs, const void* rhs) const
    3299  {
    3300  return lhs < rhs;
    3301  }
    3302 };
    3303 
    3304 class VmaDefragmentator;
    3305 
    3306 /*
    3307 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    3308 Vulkan memory type.
    3309 
    3310 Synchronized internally with a mutex.
    3311 */
    3312 struct VmaBlockVector
    3313 {
    3314  VmaBlockVector(
    3315  VmaAllocator hAllocator,
    3316  uint32_t memoryTypeIndex,
    3317  VMA_BLOCK_VECTOR_TYPE blockVectorType,
    3318  VkDeviceSize preferredBlockSize,
    3319  size_t minBlockCount,
    3320  size_t maxBlockCount,
    3321  VkDeviceSize bufferImageGranularity,
    3322  uint32_t frameInUseCount,
    3323  bool isCustomPool);
    3324  ~VmaBlockVector();
    3325 
    3326  VkResult CreateMinBlocks();
    3327 
    3328  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    3329  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    3330  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    3331  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    3332  VMA_BLOCK_VECTOR_TYPE GetBlockVectorType() const { return m_BlockVectorType; }
    3333 
    3334  void GetPoolStats(VmaPoolStats* pStats);
    3335 
    3336  bool IsEmpty() const { return m_Blocks.empty(); }
    3337 
    3338  VkResult Allocate(
    3339  VmaPool hCurrentPool,
    3340  uint32_t currentFrameIndex,
    3341  const VkMemoryRequirements& vkMemReq,
    3342  const VmaAllocationCreateInfo& createInfo,
    3343  VmaSuballocationType suballocType,
    3344  VmaAllocation* pAllocation);
    3345 
    3346  void Free(
    3347  VmaAllocation hAllocation);
    3348 
    3349  // Adds statistics of this BlockVector to pStats.
    3350  void AddStats(VmaStats* pStats);
    3351 
    3352 #if VMA_STATS_STRING_ENABLED
    3353  void PrintDetailedMap(class VmaJsonWriter& json);
    3354 #endif
    3355 
    3356  void UnmapPersistentlyMappedMemory();
    3357  VkResult MapPersistentlyMappedMemory();
    3358 
    3359  void MakePoolAllocationsLost(
    3360  uint32_t currentFrameIndex,
    3361  size_t* pLostAllocationCount);
    3362 
    3363  VmaDefragmentator* EnsureDefragmentator(
    3364  VmaAllocator hAllocator,
    3365  uint32_t currentFrameIndex);
    3366 
    3367  VkResult Defragment(
    3368  VmaDefragmentationStats* pDefragmentationStats,
    3369  VkDeviceSize& maxBytesToMove,
    3370  uint32_t& maxAllocationsToMove);
    3371 
    3372  void DestroyDefragmentator();
    3373 
    3374 private:
    3375  friend class VmaDefragmentator;
    3376 
    3377  const VmaAllocator m_hAllocator;
    3378  const uint32_t m_MemoryTypeIndex;
    3379  const VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
    3380  const VkDeviceSize m_PreferredBlockSize;
    3381  const size_t m_MinBlockCount;
    3382  const size_t m_MaxBlockCount;
    3383  const VkDeviceSize m_BufferImageGranularity;
    3384  const uint32_t m_FrameInUseCount;
    3385  const bool m_IsCustomPool;
    3386  VMA_MUTEX m_Mutex;
    3387  // Incrementally sorted by sumFreeSize, ascending.
    3388  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    3389  /* There can be at most one allocation that is completely empty - a
    3390  hysteresis to avoid pessimistic case of alternating creation and destruction
    3391  of a VkDeviceMemory. */
    3392  bool m_HasEmptyBlock;
    3393  VmaDefragmentator* m_pDefragmentator;
    3394 
    3395  // Finds and removes given block from vector.
    3396  void Remove(VmaDeviceMemoryBlock* pBlock);
    3397 
    3398  // Performs single step in sorting m_Blocks. They may not be fully sorted
    3399  // after this call.
    3400  void IncrementallySortBlocks();
    3401 
    3402  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    3403 };
    3404 
    3405 struct VmaPool_T
    3406 {
    3407 public:
    3408  VmaBlockVector m_BlockVector;
    3409 
    3410  // Takes ownership.
    3411  VmaPool_T(
    3412  VmaAllocator hAllocator,
    3413  const VmaPoolCreateInfo& createInfo);
    3414  ~VmaPool_T();
    3415 
    3416  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
    3417 
    3418 #if VMA_STATS_STRING_ENABLED
    3419  //void PrintDetailedMap(class VmaStringBuilder& sb);
    3420 #endif
    3421 };
    3422 
    3423 class VmaDefragmentator
    3424 {
    3425  const VmaAllocator m_hAllocator;
    3426  VmaBlockVector* const m_pBlockVector;
    3427  uint32_t m_CurrentFrameIndex;
    3428  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
    3429  VkDeviceSize m_BytesMoved;
    3430  uint32_t m_AllocationsMoved;
    3431 
    3432  struct AllocationInfo
    3433  {
    3434  VmaAllocation m_hAllocation;
    3435  VkBool32* m_pChanged;
    3436 
    3437  AllocationInfo() :
    3438  m_hAllocation(VK_NULL_HANDLE),
    3439  m_pChanged(VMA_NULL)
    3440  {
    3441  }
    3442  };
    3443 
    3444  struct AllocationInfoSizeGreater
    3445  {
    3446  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    3447  {
    3448  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    3449  }
    3450  };
    3451 
    3452  // Used between AddAllocation and Defragment.
    3453  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    3454 
    3455  struct BlockInfo
    3456  {
    3457  VmaDeviceMemoryBlock* m_pBlock;
    3458  bool m_HasNonMovableAllocations;
    3459  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    3460 
    3461  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    3462  m_pBlock(VMA_NULL),
    3463  m_HasNonMovableAllocations(true),
    3464  m_Allocations(pAllocationCallbacks),
    3465  m_pMappedDataForDefragmentation(VMA_NULL)
    3466  {
    3467  }
    3468 
    3469  void CalcHasNonMovableAllocations()
    3470  {
    3471  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
    3472  const size_t defragmentAllocCount = m_Allocations.size();
    3473  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    3474  }
    3475 
    3476  void SortAllocationsBySizeDescecnding()
    3477  {
    3478  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    3479  }
    3480 
    3481  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
    3482  void Unmap(VmaAllocator hAllocator);
    3483 
    3484  private:
    3485  // Not null if mapped for defragmentation only, not persistently mapped.
    3486  void* m_pMappedDataForDefragmentation;
    3487  };
    3488 
    3489  struct BlockPointerLess
    3490  {
    3491  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    3492  {
    3493  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    3494  }
    3495  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    3496  {
    3497  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    3498  }
    3499  };
    3500 
    3501  // 1. Blocks with some non-movable allocations go first.
    3502  // 2. Blocks with smaller sumFreeSize go first.
    3503  struct BlockInfoCompareMoveDestination
    3504  {
    3505  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    3506  {
    3507  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    3508  {
    3509  return true;
    3510  }
    3511  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    3512  {
    3513  return false;
    3514  }
    3515  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
    3516  {
    3517  return true;
    3518  }
    3519  return false;
    3520  }
    3521  };
    3522 
    3523  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    3524  BlockInfoVector m_Blocks;
    3525 
    3526  VkResult DefragmentRound(
    3527  VkDeviceSize maxBytesToMove,
    3528  uint32_t maxAllocationsToMove);
    3529 
    3530  static bool MoveMakesSense(
    3531  size_t dstBlockIndex, VkDeviceSize dstOffset,
    3532  size_t srcBlockIndex, VkDeviceSize srcOffset);
    3533 
    3534 public:
    3535  VmaDefragmentator(
    3536  VmaAllocator hAllocator,
    3537  VmaBlockVector* pBlockVector,
    3538  uint32_t currentFrameIndex);
    3539 
    3540  ~VmaDefragmentator();
    3541 
    3542  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    3543  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    3544 
    3545  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    3546 
    3547  VkResult Defragment(
    3548  VkDeviceSize maxBytesToMove,
    3549  uint32_t maxAllocationsToMove);
    3550 };
    3551 
    3552 // Main allocator object.
    3553 struct VmaAllocator_T
    3554 {
    3555  bool m_UseMutex;
    3556  bool m_UseKhrDedicatedAllocation;
    3557  VkDevice m_hDevice;
    3558  bool m_AllocationCallbacksSpecified;
    3559  VkAllocationCallbacks m_AllocationCallbacks;
    3560  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    3561  // Non-zero when we are inside UnmapPersistentlyMappedMemory...MapPersistentlyMappedMemory.
    3562  // Counter to allow nested calls to these functions.
    3563  uint32_t m_UnmapPersistentlyMappedMemoryCounter;
    3564 
    3565  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
    3566  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    3567  VMA_MUTEX m_HeapSizeLimitMutex;
    3568 
    3569  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    3570  VkPhysicalDeviceMemoryProperties m_MemProps;
    3571 
    3572  // Default pools.
    3573  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
    3574 
    3575  // Each vector is sorted by memory (handle value).
    3576  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    3577  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
    3578  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    3579 
    3580  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    3581  ~VmaAllocator_T();
    3582 
    3583  const VkAllocationCallbacks* GetAllocationCallbacks() const
    3584  {
    3585  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    3586  }
    3587  const VmaVulkanFunctions& GetVulkanFunctions() const
    3588  {
    3589  return m_VulkanFunctions;
    3590  }
    3591 
    3592  VkDeviceSize GetBufferImageGranularity() const
    3593  {
    3594  return VMA_MAX(
    3595  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    3596  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    3597  }
    3598 
    3599  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    3600  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    3601 
    3602  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    3603  {
    3604  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    3605  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    3606  }
    3607 
    3608  void GetBufferMemoryRequirements(
    3609  VkBuffer hBuffer,
    3610  VkMemoryRequirements& memReq,
    3611  bool& requiresDedicatedAllocation,
    3612  bool& prefersDedicatedAllocation) const;
    3613  void GetImageMemoryRequirements(
    3614  VkImage hImage,
    3615  VkMemoryRequirements& memReq,
    3616  bool& requiresDedicatedAllocation,
    3617  bool& prefersDedicatedAllocation) const;
    3618 
    3619  // Main allocation function.
    3620  VkResult AllocateMemory(
    3621  const VkMemoryRequirements& vkMemReq,
    3622  bool requiresDedicatedAllocation,
    3623  bool prefersDedicatedAllocation,
    3624  VkBuffer dedicatedBuffer,
    3625  VkImage dedicatedImage,
    3626  const VmaAllocationCreateInfo& createInfo,
    3627  VmaSuballocationType suballocType,
    3628  VmaAllocation* pAllocation);
    3629 
    3630  // Main deallocation function.
    3631  void FreeMemory(const VmaAllocation allocation);
    3632 
    3633  void CalculateStats(VmaStats* pStats);
    3634 
    3635 #if VMA_STATS_STRING_ENABLED
    3636  void PrintDetailedMap(class VmaJsonWriter& json);
    3637 #endif
    3638 
    3639  void UnmapPersistentlyMappedMemory();
    3640  VkResult MapPersistentlyMappedMemory();
    3641 
    3642  VkResult Defragment(
    3643  VmaAllocation* pAllocations,
    3644  size_t allocationCount,
    3645  VkBool32* pAllocationsChanged,
    3646  const VmaDefragmentationInfo* pDefragmentationInfo,
    3647  VmaDefragmentationStats* pDefragmentationStats);
    3648 
    3649  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    3650 
    3651  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    3652  void DestroyPool(VmaPool pool);
    3653  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    3654 
    3655  void SetCurrentFrameIndex(uint32_t frameIndex);
    3656 
    3657  void MakePoolAllocationsLost(
    3658  VmaPool hPool,
    3659  size_t* pLostAllocationCount);
    3660 
    3661  void CreateLostAllocation(VmaAllocation* pAllocation);
    3662 
    3663  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    3664  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    3665 
    3666 private:
    3667  VkDeviceSize m_PreferredLargeHeapBlockSize;
    3668  VkDeviceSize m_PreferredSmallHeapBlockSize;
    3669 
    3670  VkPhysicalDevice m_PhysicalDevice;
    3671  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    3672 
    3673  VMA_MUTEX m_PoolsMutex;
    3674  // Protected by m_PoolsMutex. Sorted by pointer value.
    3675  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    3676 
    3677  VmaVulkanFunctions m_VulkanFunctions;
    3678 
    3679  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    3680 
    3681  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    3682 
    3683  VkResult AllocateMemoryOfType(
    3684  const VkMemoryRequirements& vkMemReq,
    3685  bool dedicatedAllocation,
    3686  VkBuffer dedicatedBuffer,
    3687  VkImage dedicatedImage,
    3688  const VmaAllocationCreateInfo& createInfo,
    3689  uint32_t memTypeIndex,
    3690  VmaSuballocationType suballocType,
    3691  VmaAllocation* pAllocation);
    3692 
    3693  // Allocates and registers new VkDeviceMemory specifically for single allocation.
    3694  VkResult AllocateDedicatedMemory(
    3695  VkDeviceSize size,
    3696  VmaSuballocationType suballocType,
    3697  uint32_t memTypeIndex,
    3698  bool map,
    3699  void* pUserData,
    3700  VkBuffer dedicatedBuffer,
    3701  VkImage dedicatedImage,
    3702  VmaAllocation* pAllocation);
    3703 
    3704  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    3705  void FreeDedicatedMemory(VmaAllocation allocation);
    3706 };
    3707 
    3709 // Memory allocation #2 after VmaAllocator_T definition
    3710 
    3711 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    3712 {
    3713  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    3714 }
    3715 
    3716 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    3717 {
    3718  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    3719 }
    3720 
    3721 template<typename T>
    3722 static T* VmaAllocate(VmaAllocator hAllocator)
    3723 {
    3724  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    3725 }
    3726 
    3727 template<typename T>
    3728 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    3729 {
    3730  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    3731 }
    3732 
    3733 template<typename T>
    3734 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    3735 {
    3736  if(ptr != VMA_NULL)
    3737  {
    3738  ptr->~T();
    3739  VmaFree(hAllocator, ptr);
    3740  }
    3741 }
    3742 
    3743 template<typename T>
    3744 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    3745 {
    3746  if(ptr != VMA_NULL)
    3747  {
    3748  for(size_t i = count; i--; )
    3749  ptr[i].~T();
    3750  VmaFree(hAllocator, ptr);
    3751  }
    3752 }
    3753 
    3755 // VmaStringBuilder
    3756 
    3757 #if VMA_STATS_STRING_ENABLED
    3758 
    3759 class VmaStringBuilder
    3760 {
    3761 public:
    3762  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    3763  size_t GetLength() const { return m_Data.size(); }
    3764  const char* GetData() const { return m_Data.data(); }
    3765 
    3766  void Add(char ch) { m_Data.push_back(ch); }
    3767  void Add(const char* pStr);
    3768  void AddNewLine() { Add('\n'); }
    3769  void AddNumber(uint32_t num);
    3770  void AddNumber(uint64_t num);
    3771  void AddPointer(const void* ptr);
    3772 
    3773 private:
    3774  VmaVector< char, VmaStlAllocator<char> > m_Data;
    3775 };
    3776 
    3777 void VmaStringBuilder::Add(const char* pStr)
    3778 {
    3779  const size_t strLen = strlen(pStr);
    3780  if(strLen > 0)
    3781  {
    3782  const size_t oldCount = m_Data.size();
    3783  m_Data.resize(oldCount + strLen);
    3784  memcpy(m_Data.data() + oldCount, pStr, strLen);
    3785  }
    3786 }
    3787 
    3788 void VmaStringBuilder::AddNumber(uint32_t num)
    3789 {
    3790  char buf[11];
    3791  VmaUint32ToStr(buf, sizeof(buf), num);
    3792  Add(buf);
    3793 }
    3794 
    3795 void VmaStringBuilder::AddNumber(uint64_t num)
    3796 {
    3797  char buf[21];
    3798  VmaUint64ToStr(buf, sizeof(buf), num);
    3799  Add(buf);
    3800 }
    3801 
    3802 void VmaStringBuilder::AddPointer(const void* ptr)
    3803 {
    3804  char buf[21];
    3805  VmaPtrToStr(buf, sizeof(buf), ptr);
    3806  Add(buf);
    3807 }
    3808 
    3809 #endif // #if VMA_STATS_STRING_ENABLED
    3810 
    3812 // VmaJsonWriter
    3813 
    3814 #if VMA_STATS_STRING_ENABLED
    3815 
    3816 class VmaJsonWriter
    3817 {
    3818 public:
    3819  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    3820  ~VmaJsonWriter();
    3821 
    3822  void BeginObject(bool singleLine = false);
    3823  void EndObject();
    3824 
    3825  void BeginArray(bool singleLine = false);
    3826  void EndArray();
    3827 
    3828  void WriteString(const char* pStr);
    3829  void BeginString(const char* pStr = VMA_NULL);
    3830  void ContinueString(const char* pStr);
    3831  void ContinueString(uint32_t n);
    3832  void ContinueString(uint64_t n);
    3833  void EndString(const char* pStr = VMA_NULL);
    3834 
    3835  void WriteNumber(uint32_t n);
    3836  void WriteNumber(uint64_t n);
    3837  void WriteBool(bool b);
    3838  void WriteNull();
    3839 
    3840 private:
    3841  static const char* const INDENT;
    3842 
    3843  enum COLLECTION_TYPE
    3844  {
    3845  COLLECTION_TYPE_OBJECT,
    3846  COLLECTION_TYPE_ARRAY,
    3847  };
    3848  struct StackItem
    3849  {
    3850  COLLECTION_TYPE type;
    3851  uint32_t valueCount;
    3852  bool singleLineMode;
    3853  };
    3854 
    3855  VmaStringBuilder& m_SB;
    3856  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    3857  bool m_InsideString;
    3858 
    3859  void BeginValue(bool isString);
    3860  void WriteIndent(bool oneLess = false);
    3861 };
    3862 
    3863 const char* const VmaJsonWriter::INDENT = " ";
    3864 
    3865 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    3866  m_SB(sb),
    3867  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    3868  m_InsideString(false)
    3869 {
    3870 }
    3871 
    3872 VmaJsonWriter::~VmaJsonWriter()
    3873 {
    3874  VMA_ASSERT(!m_InsideString);
    3875  VMA_ASSERT(m_Stack.empty());
    3876 }
    3877 
    3878 void VmaJsonWriter::BeginObject(bool singleLine)
    3879 {
    3880  VMA_ASSERT(!m_InsideString);
    3881 
    3882  BeginValue(false);
    3883  m_SB.Add('{');
    3884 
    3885  StackItem item;
    3886  item.type = COLLECTION_TYPE_OBJECT;
    3887  item.valueCount = 0;
    3888  item.singleLineMode = singleLine;
    3889  m_Stack.push_back(item);
    3890 }
    3891 
    3892 void VmaJsonWriter::EndObject()
    3893 {
    3894  VMA_ASSERT(!m_InsideString);
    3895 
    3896  WriteIndent(true);
    3897  m_SB.Add('}');
    3898 
    3899  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    3900  m_Stack.pop_back();
    3901 }
    3902 
    3903 void VmaJsonWriter::BeginArray(bool singleLine)
    3904 {
    3905  VMA_ASSERT(!m_InsideString);
    3906 
    3907  BeginValue(false);
    3908  m_SB.Add('[');
    3909 
    3910  StackItem item;
    3911  item.type = COLLECTION_TYPE_ARRAY;
    3912  item.valueCount = 0;
    3913  item.singleLineMode = singleLine;
    3914  m_Stack.push_back(item);
    3915 }
    3916 
    3917 void VmaJsonWriter::EndArray()
    3918 {
    3919  VMA_ASSERT(!m_InsideString);
    3920 
    3921  WriteIndent(true);
    3922  m_SB.Add(']');
    3923 
    3924  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    3925  m_Stack.pop_back();
    3926 }
    3927 
    3928 void VmaJsonWriter::WriteString(const char* pStr)
    3929 {
    3930  BeginString(pStr);
    3931  EndString();
    3932 }
    3933 
    3934 void VmaJsonWriter::BeginString(const char* pStr)
    3935 {
    3936  VMA_ASSERT(!m_InsideString);
    3937 
    3938  BeginValue(true);
    3939  m_SB.Add('"');
    3940  m_InsideString = true;
    3941  if(pStr != VMA_NULL && pStr[0] != '\0')
    3942  {
    3943  ContinueString(pStr);
    3944  }
    3945 }
    3946 
    3947 void VmaJsonWriter::ContinueString(const char* pStr)
    3948 {
    3949  VMA_ASSERT(m_InsideString);
    3950 
    3951  const size_t strLen = strlen(pStr);
    3952  for(size_t i = 0; i < strLen; ++i)
    3953  {
    3954  char ch = pStr[i];
    3955  if(ch == '\'')
    3956  {
    3957  m_SB.Add("\\\\");
    3958  }
    3959  else if(ch == '"')
    3960  {
    3961  m_SB.Add("\\\"");
    3962  }
    3963  else if(ch >= 32)
    3964  {
    3965  m_SB.Add(ch);
    3966  }
    3967  else switch(ch)
    3968  {
    3969  case '\n':
    3970  m_SB.Add("\\n");
    3971  break;
    3972  case '\r':
    3973  m_SB.Add("\\r");
    3974  break;
    3975  case '\t':
    3976  m_SB.Add("\\t");
    3977  break;
    3978  default:
    3979  VMA_ASSERT(0 && "Character not currently supported.");
    3980  break;
    3981  }
    3982  }
    3983 }
    3984 
    3985 void VmaJsonWriter::ContinueString(uint32_t n)
    3986 {
    3987  VMA_ASSERT(m_InsideString);
    3988  m_SB.AddNumber(n);
    3989 }
    3990 
    3991 void VmaJsonWriter::ContinueString(uint64_t n)
    3992 {
    3993  VMA_ASSERT(m_InsideString);
    3994  m_SB.AddNumber(n);
    3995 }
    3996 
    3997 void VmaJsonWriter::EndString(const char* pStr)
    3998 {
    3999  VMA_ASSERT(m_InsideString);
    4000  if(pStr != VMA_NULL && pStr[0] != '\0')
    4001  {
    4002  ContinueString(pStr);
    4003  }
    4004  m_SB.Add('"');
    4005  m_InsideString = false;
    4006 }
    4007 
    4008 void VmaJsonWriter::WriteNumber(uint32_t n)
    4009 {
    4010  VMA_ASSERT(!m_InsideString);
    4011  BeginValue(false);
    4012  m_SB.AddNumber(n);
    4013 }
    4014 
    4015 void VmaJsonWriter::WriteNumber(uint64_t n)
    4016 {
    4017  VMA_ASSERT(!m_InsideString);
    4018  BeginValue(false);
    4019  m_SB.AddNumber(n);
    4020 }
    4021 
    4022 void VmaJsonWriter::WriteBool(bool b)
    4023 {
    4024  VMA_ASSERT(!m_InsideString);
    4025  BeginValue(false);
    4026  m_SB.Add(b ? "true" : "false");
    4027 }
    4028 
    4029 void VmaJsonWriter::WriteNull()
    4030 {
    4031  VMA_ASSERT(!m_InsideString);
    4032  BeginValue(false);
    4033  m_SB.Add("null");
    4034 }
    4035 
    4036 void VmaJsonWriter::BeginValue(bool isString)
    4037 {
    4038  if(!m_Stack.empty())
    4039  {
    4040  StackItem& currItem = m_Stack.back();
    4041  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    4042  currItem.valueCount % 2 == 0)
    4043  {
    4044  VMA_ASSERT(isString);
    4045  }
    4046 
    4047  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    4048  currItem.valueCount % 2 != 0)
    4049  {
    4050  m_SB.Add(": ");
    4051  }
    4052  else if(currItem.valueCount > 0)
    4053  {
    4054  m_SB.Add(", ");
    4055  WriteIndent();
    4056  }
    4057  else
    4058  {
    4059  WriteIndent();
    4060  }
    4061  ++currItem.valueCount;
    4062  }
    4063 }
    4064 
    4065 void VmaJsonWriter::WriteIndent(bool oneLess)
    4066 {
    4067  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    4068  {
    4069  m_SB.AddNewLine();
    4070 
    4071  size_t count = m_Stack.size();
    4072  if(count > 0 && oneLess)
    4073  {
    4074  --count;
    4075  }
    4076  for(size_t i = 0; i < count; ++i)
    4077  {
    4078  m_SB.Add(INDENT);
    4079  }
    4080  }
    4081 }
    4082 
    4083 #endif // #if VMA_STATS_STRING_ENABLED
    4084 
    4086 
    4087 VkDeviceSize VmaAllocation_T::GetOffset() const
    4088 {
    4089  switch(m_Type)
    4090  {
    4091  case ALLOCATION_TYPE_BLOCK:
    4092  return m_BlockAllocation.m_Offset;
    4093  case ALLOCATION_TYPE_DEDICATED:
    4094  return 0;
    4095  default:
    4096  VMA_ASSERT(0);
    4097  return 0;
    4098  }
    4099 }
    4100 
    4101 VkDeviceMemory VmaAllocation_T::GetMemory() const
    4102 {
    4103  switch(m_Type)
    4104  {
    4105  case ALLOCATION_TYPE_BLOCK:
    4106  return m_BlockAllocation.m_Block->m_hMemory;
    4107  case ALLOCATION_TYPE_DEDICATED:
    4108  return m_DedicatedAllocation.m_hMemory;
    4109  default:
    4110  VMA_ASSERT(0);
    4111  return VK_NULL_HANDLE;
    4112  }
    4113 }
    4114 
    4115 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    4116 {
    4117  switch(m_Type)
    4118  {
    4119  case ALLOCATION_TYPE_BLOCK:
    4120  return m_BlockAllocation.m_Block->m_MemoryTypeIndex;
    4121  case ALLOCATION_TYPE_DEDICATED:
    4122  return m_DedicatedAllocation.m_MemoryTypeIndex;
    4123  default:
    4124  VMA_ASSERT(0);
    4125  return UINT32_MAX;
    4126  }
    4127 }
    4128 
    4129 VMA_BLOCK_VECTOR_TYPE VmaAllocation_T::GetBlockVectorType() const
    4130 {
    4131  switch(m_Type)
    4132  {
    4133  case ALLOCATION_TYPE_BLOCK:
    4134  return m_BlockAllocation.m_Block->m_BlockVectorType;
    4135  case ALLOCATION_TYPE_DEDICATED:
    4136  return (m_DedicatedAllocation.m_PersistentMap ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED);
    4137  default:
    4138  VMA_ASSERT(0);
    4139  return VMA_BLOCK_VECTOR_TYPE_COUNT;
    4140  }
    4141 }
    4142 
    4143 void* VmaAllocation_T::GetMappedData() const
    4144 {
    4145  switch(m_Type)
    4146  {
    4147  case ALLOCATION_TYPE_BLOCK:
    4148  if(m_BlockAllocation.m_Block->m_pMappedData != VMA_NULL)
    4149  {
    4150  return (char*)m_BlockAllocation.m_Block->m_pMappedData + m_BlockAllocation.m_Offset;
    4151  }
    4152  else
    4153  {
    4154  return VMA_NULL;
    4155  }
    4156  break;
    4157  case ALLOCATION_TYPE_DEDICATED:
    4158  return m_DedicatedAllocation.m_pMappedData;
    4159  default:
    4160  VMA_ASSERT(0);
    4161  return VMA_NULL;
    4162  }
    4163 }
    4164 
    4165 bool VmaAllocation_T::CanBecomeLost() const
    4166 {
    4167  switch(m_Type)
    4168  {
    4169  case ALLOCATION_TYPE_BLOCK:
    4170  return m_BlockAllocation.m_CanBecomeLost;
    4171  case ALLOCATION_TYPE_DEDICATED:
    4172  return false;
    4173  default:
    4174  VMA_ASSERT(0);
    4175  return false;
    4176  }
    4177 }
    4178 
    4179 VmaPool VmaAllocation_T::GetPool() const
    4180 {
    4181  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    4182  return m_BlockAllocation.m_hPool;
    4183 }
    4184 
    4185 VkResult VmaAllocation_T::DedicatedAllocMapPersistentlyMappedMemory(VmaAllocator hAllocator)
    4186 {
    4187  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    4188  if(m_DedicatedAllocation.m_PersistentMap)
    4189  {
    4190  return (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    4191  hAllocator->m_hDevice,
    4192  m_DedicatedAllocation.m_hMemory,
    4193  0,
    4194  VK_WHOLE_SIZE,
    4195  0,
    4196  &m_DedicatedAllocation.m_pMappedData);
    4197  }
    4198  return VK_SUCCESS;
    4199 }
    4200 void VmaAllocation_T::DedicatedAllocUnmapPersistentlyMappedMemory(VmaAllocator hAllocator)
    4201 {
    4202  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    4203  if(m_DedicatedAllocation.m_pMappedData)
    4204  {
    4205  VMA_ASSERT(m_DedicatedAllocation.m_PersistentMap);
    4206  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_DedicatedAllocation.m_hMemory);
    4207  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    4208  }
    4209 }
    4210 
    4211 
    4212 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    4213 {
    4214  VMA_ASSERT(CanBecomeLost());
    4215 
    4216  /*
    4217  Warning: This is a carefully designed algorithm.
    4218  Do not modify unless you really know what you're doing :)
    4219  */
    4220  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    4221  for(;;)
    4222  {
    4223  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    4224  {
    4225  VMA_ASSERT(0);
    4226  return false;
    4227  }
    4228  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    4229  {
    4230  return false;
    4231  }
    4232  else // Last use time earlier than current time.
    4233  {
    4234  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    4235  {
    4236  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    4237  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    4238  return true;
    4239  }
    4240  }
    4241  }
    4242 }
    4243 
    4244 #if VMA_STATS_STRING_ENABLED
    4245 
    4246 // Correspond to values of enum VmaSuballocationType.
    4247 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    4248  "FREE",
    4249  "UNKNOWN",
    4250  "BUFFER",
    4251  "IMAGE_UNKNOWN",
    4252  "IMAGE_LINEAR",
    4253  "IMAGE_OPTIMAL",
    4254 };
    4255 
    4256 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    4257 {
    4258  json.BeginObject();
    4259 
    4260  json.WriteString("Blocks");
    4261  json.WriteNumber(stat.blockCount);
    4262 
    4263  json.WriteString("Allocations");
    4264  json.WriteNumber(stat.allocationCount);
    4265 
    4266  json.WriteString("UnusedRanges");
    4267  json.WriteNumber(stat.unusedRangeCount);
    4268 
    4269  json.WriteString("UsedBytes");
    4270  json.WriteNumber(stat.usedBytes);
    4271 
    4272  json.WriteString("UnusedBytes");
    4273  json.WriteNumber(stat.unusedBytes);
    4274 
    4275  if(stat.allocationCount > 1)
    4276  {
    4277  json.WriteString("AllocationSize");
    4278  json.BeginObject(true);
    4279  json.WriteString("Min");
    4280  json.WriteNumber(stat.allocationSizeMin);
    4281  json.WriteString("Avg");
    4282  json.WriteNumber(stat.allocationSizeAvg);
    4283  json.WriteString("Max");
    4284  json.WriteNumber(stat.allocationSizeMax);
    4285  json.EndObject();
    4286  }
    4287 
    4288  if(stat.unusedRangeCount > 1)
    4289  {
    4290  json.WriteString("UnusedRangeSize");
    4291  json.BeginObject(true);
    4292  json.WriteString("Min");
    4293  json.WriteNumber(stat.unusedRangeSizeMin);
    4294  json.WriteString("Avg");
    4295  json.WriteNumber(stat.unusedRangeSizeAvg);
    4296  json.WriteString("Max");
    4297  json.WriteNumber(stat.unusedRangeSizeMax);
    4298  json.EndObject();
    4299  }
    4300 
    4301  json.EndObject();
    4302 }
    4303 
    4304 #endif // #if VMA_STATS_STRING_ENABLED
    4305 
    4306 struct VmaSuballocationItemSizeLess
    4307 {
    4308  bool operator()(
    4309  const VmaSuballocationList::iterator lhs,
    4310  const VmaSuballocationList::iterator rhs) const
    4311  {
    4312  return lhs->size < rhs->size;
    4313  }
    4314  bool operator()(
    4315  const VmaSuballocationList::iterator lhs,
    4316  VkDeviceSize rhsSize) const
    4317  {
    4318  return lhs->size < rhsSize;
    4319  }
    4320 };
    4321 
    4323 // class VmaBlockMetadata
    4324 
    4325 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    4326  m_Size(0),
    4327  m_FreeCount(0),
    4328  m_SumFreeSize(0),
    4329  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    4330  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    4331 {
    4332 }
    4333 
    4334 VmaBlockMetadata::~VmaBlockMetadata()
    4335 {
    4336 }
    4337 
    4338 void VmaBlockMetadata::Init(VkDeviceSize size)
    4339 {
    4340  m_Size = size;
    4341  m_FreeCount = 1;
    4342  m_SumFreeSize = size;
    4343 
    4344  VmaSuballocation suballoc = {};
    4345  suballoc.offset = 0;
    4346  suballoc.size = size;
    4347  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    4348  suballoc.hAllocation = VK_NULL_HANDLE;
    4349 
    4350  m_Suballocations.push_back(suballoc);
    4351  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    4352  --suballocItem;
    4353  m_FreeSuballocationsBySize.push_back(suballocItem);
    4354 }
    4355 
    4356 bool VmaBlockMetadata::Validate() const
    4357 {
    4358  if(m_Suballocations.empty())
    4359  {
    4360  return false;
    4361  }
    4362 
    4363  // Expected offset of new suballocation as calculates from previous ones.
    4364  VkDeviceSize calculatedOffset = 0;
    4365  // Expected number of free suballocations as calculated from traversing their list.
    4366  uint32_t calculatedFreeCount = 0;
    4367  // Expected sum size of free suballocations as calculated from traversing their list.
    4368  VkDeviceSize calculatedSumFreeSize = 0;
    4369  // Expected number of free suballocations that should be registered in
    4370  // m_FreeSuballocationsBySize calculated from traversing their list.
    4371  size_t freeSuballocationsToRegister = 0;
    4372  // True if previous visisted suballocation was free.
    4373  bool prevFree = false;
    4374 
    4375  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    4376  suballocItem != m_Suballocations.cend();
    4377  ++suballocItem)
    4378  {
    4379  const VmaSuballocation& subAlloc = *suballocItem;
    4380 
    4381  // Actual offset of this suballocation doesn't match expected one.
    4382  if(subAlloc.offset != calculatedOffset)
    4383  {
    4384  return false;
    4385  }
    4386 
    4387  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    4388  // Two adjacent free suballocations are invalid. They should be merged.
    4389  if(prevFree && currFree)
    4390  {
    4391  return false;
    4392  }
    4393  prevFree = currFree;
    4394 
    4395  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
    4396  {
    4397  return false;
    4398  }
    4399 
    4400  if(currFree)
    4401  {
    4402  calculatedSumFreeSize += subAlloc.size;
    4403  ++calculatedFreeCount;
    4404  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    4405  {
    4406  ++freeSuballocationsToRegister;
    4407  }
    4408  }
    4409 
    4410  calculatedOffset += subAlloc.size;
    4411  }
    4412 
    4413  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    4414  // match expected one.
    4415  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
    4416  {
    4417  return false;
    4418  }
    4419 
    4420  VkDeviceSize lastSize = 0;
    4421  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    4422  {
    4423  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    4424 
    4425  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    4426  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
    4427  {
    4428  return false;
    4429  }
    4430  // They must be sorted by size ascending.
    4431  if(suballocItem->size < lastSize)
    4432  {
    4433  return false;
    4434  }
    4435 
    4436  lastSize = suballocItem->size;
    4437  }
    4438 
    4439  // Check if totals match calculacted values.
    4440  return
    4441  ValidateFreeSuballocationList() &&
    4442  (calculatedOffset == m_Size) &&
    4443  (calculatedSumFreeSize == m_SumFreeSize) &&
    4444  (calculatedFreeCount == m_FreeCount);
    4445 }
    4446 
    4447 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
    4448 {
    4449  if(!m_FreeSuballocationsBySize.empty())
    4450  {
    4451  return m_FreeSuballocationsBySize.back()->size;
    4452  }
    4453  else
    4454  {
    4455  return 0;
    4456  }
    4457 }
    4458 
    4459 bool VmaBlockMetadata::IsEmpty() const
    4460 {
    4461  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    4462 }
    4463 
    4464 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    4465 {
    4466  outInfo.blockCount = 1;
    4467 
    4468  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    4469  outInfo.allocationCount = rangeCount - m_FreeCount;
    4470  outInfo.unusedRangeCount = m_FreeCount;
    4471 
    4472  outInfo.unusedBytes = m_SumFreeSize;
    4473  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
    4474 
    4475  outInfo.allocationSizeMin = UINT64_MAX;
    4476  outInfo.allocationSizeMax = 0;
    4477  outInfo.unusedRangeSizeMin = UINT64_MAX;
    4478  outInfo.unusedRangeSizeMax = 0;
    4479 
    4480  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    4481  suballocItem != m_Suballocations.cend();
    4482  ++suballocItem)
    4483  {
    4484  const VmaSuballocation& suballoc = *suballocItem;
    4485  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    4486  {
    4487  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    4488  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    4489  }
    4490  else
    4491  {
    4492  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    4493  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    4494  }
    4495  }
    4496 }
    4497 
    4498 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
    4499 {
    4500  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    4501 
    4502  inoutStats.size += m_Size;
    4503  inoutStats.unusedSize += m_SumFreeSize;
    4504  inoutStats.allocationCount += rangeCount - m_FreeCount;
    4505  inoutStats.unusedRangeCount += m_FreeCount;
    4506  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    4507 }
    4508 
    4509 #if VMA_STATS_STRING_ENABLED
    4510 
    4511 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
    4512 {
    4513  json.BeginObject();
    4514 
    4515  json.WriteString("TotalBytes");
    4516  json.WriteNumber(m_Size);
    4517 
    4518  json.WriteString("UnusedBytes");
    4519  json.WriteNumber(m_SumFreeSize);
    4520 
    4521  json.WriteString("Allocations");
    4522  json.WriteNumber(m_Suballocations.size() - m_FreeCount);
    4523 
    4524  json.WriteString("UnusedRanges");
    4525  json.WriteNumber(m_FreeCount);
    4526 
    4527  json.WriteString("Suballocations");
    4528  json.BeginArray();
    4529  size_t i = 0;
    4530  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    4531  suballocItem != m_Suballocations.cend();
    4532  ++suballocItem, ++i)
    4533  {
    4534  json.BeginObject(true);
    4535 
    4536  json.WriteString("Type");
    4537  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
    4538 
    4539  json.WriteString("Size");
    4540  json.WriteNumber(suballocItem->size);
    4541 
    4542  json.WriteString("Offset");
    4543  json.WriteNumber(suballocItem->offset);
    4544 
    4545  json.EndObject();
    4546  }
    4547  json.EndArray();
    4548 
    4549  json.EndObject();
    4550 }
    4551 
    4552 #endif // #if VMA_STATS_STRING_ENABLED
    4553 
    4554 /*
    4555 How many suitable free suballocations to analyze before choosing best one.
    4556 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
    4557  be chosen.
    4558 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
    4559  suballocations will be analized and best one will be chosen.
    4560 - Any other value is also acceptable.
    4561 */
    4562 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
    4563 
    4564 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
    4565 {
    4566  VMA_ASSERT(IsEmpty());
    4567  pAllocationRequest->offset = 0;
    4568  pAllocationRequest->sumFreeSize = m_SumFreeSize;
    4569  pAllocationRequest->sumItemSize = 0;
    4570  pAllocationRequest->item = m_Suballocations.begin();
    4571  pAllocationRequest->itemsToMakeLostCount = 0;
    4572 }
    4573 
    4574 bool VmaBlockMetadata::CreateAllocationRequest(
    4575  uint32_t currentFrameIndex,
    4576  uint32_t frameInUseCount,
    4577  VkDeviceSize bufferImageGranularity,
    4578  VkDeviceSize allocSize,
    4579  VkDeviceSize allocAlignment,
    4580  VmaSuballocationType allocType,
    4581  bool canMakeOtherLost,
    4582  VmaAllocationRequest* pAllocationRequest)
    4583 {
    4584  VMA_ASSERT(allocSize > 0);
    4585  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    4586  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    4587  VMA_HEAVY_ASSERT(Validate());
    4588 
    4589  // There is not enough total free space in this block to fullfill the request: Early return.
    4590  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
    4591  {
    4592  return false;
    4593  }
    4594 
    4595  // New algorithm, efficiently searching freeSuballocationsBySize.
    4596  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    4597  if(freeSuballocCount > 0)
    4598  {
    4599  if(VMA_BEST_FIT)
    4600  {
    4601  // Find first free suballocation with size not less than allocSize.
    4602  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    4603  m_FreeSuballocationsBySize.data(),
    4604  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    4605  allocSize,
    4606  VmaSuballocationItemSizeLess());
    4607  size_t index = it - m_FreeSuballocationsBySize.data();
    4608  for(; index < freeSuballocCount; ++index)
    4609  {
    4610  if(CheckAllocation(
    4611  currentFrameIndex,
    4612  frameInUseCount,
    4613  bufferImageGranularity,
    4614  allocSize,
    4615  allocAlignment,
    4616  allocType,
    4617  m_FreeSuballocationsBySize[index],
    4618  false, // canMakeOtherLost
    4619  &pAllocationRequest->offset,
    4620  &pAllocationRequest->itemsToMakeLostCount,
    4621  &pAllocationRequest->sumFreeSize,
    4622  &pAllocationRequest->sumItemSize))
    4623  {
    4624  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    4625  return true;
    4626  }
    4627  }
    4628  }
    4629  else
    4630  {
    4631  // Search staring from biggest suballocations.
    4632  for(size_t index = freeSuballocCount; index--; )
    4633  {
    4634  if(CheckAllocation(
    4635  currentFrameIndex,
    4636  frameInUseCount,
    4637  bufferImageGranularity,
    4638  allocSize,
    4639  allocAlignment,
    4640  allocType,
    4641  m_FreeSuballocationsBySize[index],
    4642  false, // canMakeOtherLost
    4643  &pAllocationRequest->offset,
    4644  &pAllocationRequest->itemsToMakeLostCount,
    4645  &pAllocationRequest->sumFreeSize,
    4646  &pAllocationRequest->sumItemSize))
    4647  {
    4648  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    4649  return true;
    4650  }
    4651  }
    4652  }
    4653  }
    4654 
    4655  if(canMakeOtherLost)
    4656  {
    4657  // Brute-force algorithm. TODO: Come up with something better.
    4658 
    4659  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
    4660  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
    4661 
    4662  VmaAllocationRequest tmpAllocRequest = {};
    4663  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    4664  suballocIt != m_Suballocations.end();
    4665  ++suballocIt)
    4666  {
    4667  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    4668  suballocIt->hAllocation->CanBecomeLost())
    4669  {
    4670  if(CheckAllocation(
    4671  currentFrameIndex,
    4672  frameInUseCount,
    4673  bufferImageGranularity,
    4674  allocSize,
    4675  allocAlignment,
    4676  allocType,
    4677  suballocIt,
    4678  canMakeOtherLost,
    4679  &tmpAllocRequest.offset,
    4680  &tmpAllocRequest.itemsToMakeLostCount,
    4681  &tmpAllocRequest.sumFreeSize,
    4682  &tmpAllocRequest.sumItemSize))
    4683  {
    4684  tmpAllocRequest.item = suballocIt;
    4685 
    4686  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
    4687  {
    4688  *pAllocationRequest = tmpAllocRequest;
    4689  }
    4690  }
    4691  }
    4692  }
    4693 
    4694  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
    4695  {
    4696  return true;
    4697  }
    4698  }
    4699 
    4700  return false;
    4701 }
    4702 
    4703 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
    4704  uint32_t currentFrameIndex,
    4705  uint32_t frameInUseCount,
    4706  VmaAllocationRequest* pAllocationRequest)
    4707 {
    4708  while(pAllocationRequest->itemsToMakeLostCount > 0)
    4709  {
    4710  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    4711  {
    4712  ++pAllocationRequest->item;
    4713  }
    4714  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    4715  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    4716  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    4717  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    4718  {
    4719  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    4720  --pAllocationRequest->itemsToMakeLostCount;
    4721  }
    4722  else
    4723  {
    4724  return false;
    4725  }
    4726  }
    4727 
    4728  VMA_HEAVY_ASSERT(Validate());
    4729  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    4730  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    4731 
    4732  return true;
    4733 }
    4734 
    4735 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    4736 {
    4737  uint32_t lostAllocationCount = 0;
    4738  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    4739  it != m_Suballocations.end();
    4740  ++it)
    4741  {
    4742  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    4743  it->hAllocation->CanBecomeLost() &&
    4744  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    4745  {
    4746  it = FreeSuballocation(it);
    4747  ++lostAllocationCount;
    4748  }
    4749  }
    4750  return lostAllocationCount;
    4751 }
    4752 
    4753 void VmaBlockMetadata::Alloc(
    4754  const VmaAllocationRequest& request,
    4755  VmaSuballocationType type,
    4756  VkDeviceSize allocSize,
    4757  VmaAllocation hAllocation)
    4758 {
    4759  VMA_ASSERT(request.item != m_Suballocations.end());
    4760  VmaSuballocation& suballoc = *request.item;
    4761  // Given suballocation is a free block.
    4762  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    4763  // Given offset is inside this suballocation.
    4764  VMA_ASSERT(request.offset >= suballoc.offset);
    4765  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    4766  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    4767  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    4768 
    4769  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    4770  // it to become used.
    4771  UnregisterFreeSuballocation(request.item);
    4772 
    4773  suballoc.offset = request.offset;
    4774  suballoc.size = allocSize;
    4775  suballoc.type = type;
    4776  suballoc.hAllocation = hAllocation;
    4777 
    4778  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    4779  if(paddingEnd)
    4780  {
    4781  VmaSuballocation paddingSuballoc = {};
    4782  paddingSuballoc.offset = request.offset + allocSize;
    4783  paddingSuballoc.size = paddingEnd;
    4784  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    4785  VmaSuballocationList::iterator next = request.item;
    4786  ++next;
    4787  const VmaSuballocationList::iterator paddingEndItem =
    4788  m_Suballocations.insert(next, paddingSuballoc);
    4789  RegisterFreeSuballocation(paddingEndItem);
    4790  }
    4791 
    4792  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    4793  if(paddingBegin)
    4794  {
    4795  VmaSuballocation paddingSuballoc = {};
    4796  paddingSuballoc.offset = request.offset - paddingBegin;
    4797  paddingSuballoc.size = paddingBegin;
    4798  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    4799  const VmaSuballocationList::iterator paddingBeginItem =
    4800  m_Suballocations.insert(request.item, paddingSuballoc);
    4801  RegisterFreeSuballocation(paddingBeginItem);
    4802  }
    4803 
    4804  // Update totals.
    4805  m_FreeCount = m_FreeCount - 1;
    4806  if(paddingBegin > 0)
    4807  {
    4808  ++m_FreeCount;
    4809  }
    4810  if(paddingEnd > 0)
    4811  {
    4812  ++m_FreeCount;
    4813  }
    4814  m_SumFreeSize -= allocSize;
    4815 }
    4816 
    4817 void VmaBlockMetadata::Free(const VmaAllocation allocation)
    4818 {
    4819  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    4820  suballocItem != m_Suballocations.end();
    4821  ++suballocItem)
    4822  {
    4823  VmaSuballocation& suballoc = *suballocItem;
    4824  if(suballoc.hAllocation == allocation)
    4825  {
    4826  FreeSuballocation(suballocItem);
    4827  VMA_HEAVY_ASSERT(Validate());
    4828  return;
    4829  }
    4830  }
    4831  VMA_ASSERT(0 && "Not found!");
    4832 }
    4833 
    4834 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
    4835 {
    4836  VkDeviceSize lastSize = 0;
    4837  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    4838  {
    4839  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    4840 
    4841  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    4842  {
    4843  VMA_ASSERT(0);
    4844  return false;
    4845  }
    4846  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    4847  {
    4848  VMA_ASSERT(0);
    4849  return false;
    4850  }
    4851  if(it->size < lastSize)
    4852  {
    4853  VMA_ASSERT(0);
    4854  return false;
    4855  }
    4856 
    4857  lastSize = it->size;
    4858  }
    4859  return true;
    4860 }
    4861 
    4862 bool VmaBlockMetadata::CheckAllocation(
    4863  uint32_t currentFrameIndex,
    4864  uint32_t frameInUseCount,
    4865  VkDeviceSize bufferImageGranularity,
    4866  VkDeviceSize allocSize,
    4867  VkDeviceSize allocAlignment,
    4868  VmaSuballocationType allocType,
    4869  VmaSuballocationList::const_iterator suballocItem,
    4870  bool canMakeOtherLost,
    4871  VkDeviceSize* pOffset,
    4872  size_t* itemsToMakeLostCount,
    4873  VkDeviceSize* pSumFreeSize,
    4874  VkDeviceSize* pSumItemSize) const
    4875 {
    4876  VMA_ASSERT(allocSize > 0);
    4877  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    4878  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    4879  VMA_ASSERT(pOffset != VMA_NULL);
    4880 
    4881  *itemsToMakeLostCount = 0;
    4882  *pSumFreeSize = 0;
    4883  *pSumItemSize = 0;
    4884 
    4885  if(canMakeOtherLost)
    4886  {
    4887  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    4888  {
    4889  *pSumFreeSize = suballocItem->size;
    4890  }
    4891  else
    4892  {
    4893  if(suballocItem->hAllocation->CanBecomeLost() &&
    4894  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    4895  {
    4896  ++*itemsToMakeLostCount;
    4897  *pSumItemSize = suballocItem->size;
    4898  }
    4899  else
    4900  {
    4901  return false;
    4902  }
    4903  }
    4904 
    4905  // Remaining size is too small for this request: Early return.
    4906  if(m_Size - suballocItem->offset < allocSize)
    4907  {
    4908  return false;
    4909  }
    4910 
    4911  // Start from offset equal to beginning of this suballocation.
    4912  *pOffset = suballocItem->offset;
    4913 
    4914  // Apply VMA_DEBUG_MARGIN at the beginning.
    4915  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
    4916  {
    4917  *pOffset += VMA_DEBUG_MARGIN;
    4918  }
    4919 
    4920  // Apply alignment.
    4921  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
    4922  *pOffset = VmaAlignUp(*pOffset, alignment);
    4923 
    4924  // Check previous suballocations for BufferImageGranularity conflicts.
    4925  // Make bigger alignment if necessary.
    4926  if(bufferImageGranularity > 1)
    4927  {
    4928  bool bufferImageGranularityConflict = false;
    4929  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    4930  while(prevSuballocItem != m_Suballocations.cbegin())
    4931  {
    4932  --prevSuballocItem;
    4933  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    4934  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    4935  {
    4936  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    4937  {
    4938  bufferImageGranularityConflict = true;
    4939  break;
    4940  }
    4941  }
    4942  else
    4943  // Already on previous page.
    4944  break;
    4945  }
    4946  if(bufferImageGranularityConflict)
    4947  {
    4948  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    4949  }
    4950  }
    4951 
    4952  // Now that we have final *pOffset, check if we are past suballocItem.
    4953  // If yes, return false - this function should be called for another suballocItem as starting point.
    4954  if(*pOffset >= suballocItem->offset + suballocItem->size)
    4955  {
    4956  return false;
    4957  }
    4958 
    4959  // Calculate padding at the beginning based on current offset.
    4960  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    4961 
    4962  // Calculate required margin at the end if this is not last suballocation.
    4963  VmaSuballocationList::const_iterator next = suballocItem;
    4964  ++next;
    4965  const VkDeviceSize requiredEndMargin =
    4966  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
    4967 
    4968  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    4969  // Another early return check.
    4970  if(suballocItem->offset + totalSize > m_Size)
    4971  {
    4972  return false;
    4973  }
    4974 
    4975  // Advance lastSuballocItem until desired size is reached.
    4976  // Update itemsToMakeLostCount.
    4977  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    4978  if(totalSize > suballocItem->size)
    4979  {
    4980  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    4981  while(remainingSize > 0)
    4982  {
    4983  ++lastSuballocItem;
    4984  if(lastSuballocItem == m_Suballocations.cend())
    4985  {
    4986  return false;
    4987  }
    4988  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    4989  {
    4990  *pSumFreeSize += lastSuballocItem->size;
    4991  }
    4992  else
    4993  {
    4994  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    4995  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    4996  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    4997  {
    4998  ++*itemsToMakeLostCount;
    4999  *pSumItemSize += lastSuballocItem->size;
    5000  }
    5001  else
    5002  {
    5003  return false;
    5004  }
    5005  }
    5006  remainingSize = (lastSuballocItem->size < remainingSize) ?
    5007  remainingSize - lastSuballocItem->size : 0;
    5008  }
    5009  }
    5010 
    5011  // Check next suballocations for BufferImageGranularity conflicts.
    5012  // If conflict exists, we must mark more allocations lost or fail.
    5013  if(bufferImageGranularity > 1)
    5014  {
    5015  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    5016  ++nextSuballocItem;
    5017  while(nextSuballocItem != m_Suballocations.cend())
    5018  {
    5019  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    5020  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    5021  {
    5022  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    5023  {
    5024  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    5025  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    5026  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    5027  {
    5028  ++*itemsToMakeLostCount;
    5029  }
    5030  else
    5031  {
    5032  return false;
    5033  }
    5034  }
    5035  }
    5036  else
    5037  {
    5038  // Already on next page.
    5039  break;
    5040  }
    5041  ++nextSuballocItem;
    5042  }
    5043  }
    5044  }
    5045  else
    5046  {
    5047  const VmaSuballocation& suballoc = *suballocItem;
    5048  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    5049 
    5050  *pSumFreeSize = suballoc.size;
    5051 
    5052  // Size of this suballocation is too small for this request: Early return.
    5053  if(suballoc.size < allocSize)
    5054  {
    5055  return false;
    5056  }
    5057 
    5058  // Start from offset equal to beginning of this suballocation.
    5059  *pOffset = suballoc.offset;
    5060 
    5061  // Apply VMA_DEBUG_MARGIN at the beginning.
    5062  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
    5063  {
    5064  *pOffset += VMA_DEBUG_MARGIN;
    5065  }
    5066 
    5067  // Apply alignment.
    5068  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
    5069  *pOffset = VmaAlignUp(*pOffset, alignment);
    5070 
    5071  // Check previous suballocations for BufferImageGranularity conflicts.
    5072  // Make bigger alignment if necessary.
    5073  if(bufferImageGranularity > 1)
    5074  {
    5075  bool bufferImageGranularityConflict = false;
    5076  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    5077  while(prevSuballocItem != m_Suballocations.cbegin())
    5078  {
    5079  --prevSuballocItem;
    5080  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    5081  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    5082  {
    5083  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    5084  {
    5085  bufferImageGranularityConflict = true;
    5086  break;
    5087  }
    5088  }
    5089  else
    5090  // Already on previous page.
    5091  break;
    5092  }
    5093  if(bufferImageGranularityConflict)
    5094  {
    5095  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    5096  }
    5097  }
    5098 
    5099  // Calculate padding at the beginning based on current offset.
    5100  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    5101 
    5102  // Calculate required margin at the end if this is not last suballocation.
    5103  VmaSuballocationList::const_iterator next = suballocItem;
    5104  ++next;
    5105  const VkDeviceSize requiredEndMargin =
    5106  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
    5107 
    5108  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    5109  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    5110  {
    5111  return false;
    5112  }
    5113 
    5114  // Check next suballocations for BufferImageGranularity conflicts.
    5115  // If conflict exists, allocation cannot be made here.
    5116  if(bufferImageGranularity > 1)
    5117  {
    5118  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    5119  ++nextSuballocItem;
    5120  while(nextSuballocItem != m_Suballocations.cend())
    5121  {
    5122  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    5123  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    5124  {
    5125  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    5126  {
    5127  return false;
    5128  }
    5129  }
    5130  else
    5131  {
    5132  // Already on next page.
    5133  break;
    5134  }
    5135  ++nextSuballocItem;
    5136  }
    5137  }
    5138  }
    5139 
    5140  // All tests passed: Success. pOffset is already filled.
    5141  return true;
    5142 }
    5143 
    5144 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
    5145 {
    5146  VMA_ASSERT(item != m_Suballocations.end());
    5147  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    5148 
    5149  VmaSuballocationList::iterator nextItem = item;
    5150  ++nextItem;
    5151  VMA_ASSERT(nextItem != m_Suballocations.end());
    5152  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    5153 
    5154  item->size += nextItem->size;
    5155  --m_FreeCount;
    5156  m_Suballocations.erase(nextItem);
    5157 }
    5158 
    5159 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    5160 {
    5161  // Change this suballocation to be marked as free.
    5162  VmaSuballocation& suballoc = *suballocItem;
    5163  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    5164  suballoc.hAllocation = VK_NULL_HANDLE;
    5165 
    5166  // Update totals.
    5167  ++m_FreeCount;
    5168  m_SumFreeSize += suballoc.size;
    5169 
    5170  // Merge with previous and/or next suballocation if it's also free.
    5171  bool mergeWithNext = false;
    5172  bool mergeWithPrev = false;
    5173 
    5174  VmaSuballocationList::iterator nextItem = suballocItem;
    5175  ++nextItem;
    5176  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    5177  {
    5178  mergeWithNext = true;
    5179  }
    5180 
    5181  VmaSuballocationList::iterator prevItem = suballocItem;
    5182  if(suballocItem != m_Suballocations.begin())
    5183  {
    5184  --prevItem;
    5185  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    5186  {
    5187  mergeWithPrev = true;
    5188  }
    5189  }
    5190 
    5191  if(mergeWithNext)
    5192  {
    5193  UnregisterFreeSuballocation(nextItem);
    5194  MergeFreeWithNext(suballocItem);
    5195  }
    5196 
    5197  if(mergeWithPrev)
    5198  {
    5199  UnregisterFreeSuballocation(prevItem);
    5200  MergeFreeWithNext(prevItem);
    5201  RegisterFreeSuballocation(prevItem);
    5202  return prevItem;
    5203  }
    5204  else
    5205  {
    5206  RegisterFreeSuballocation(suballocItem);
    5207  return suballocItem;
    5208  }
    5209 }
    5210 
    5211 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    5212 {
    5213  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    5214  VMA_ASSERT(item->size > 0);
    5215 
    5216  // You may want to enable this validation at the beginning or at the end of
    5217  // this function, depending on what do you want to check.
    5218  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    5219 
    5220  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    5221  {
    5222  if(m_FreeSuballocationsBySize.empty())
    5223  {
    5224  m_FreeSuballocationsBySize.push_back(item);
    5225  }
    5226  else
    5227  {
    5228  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    5229  }
    5230  }
    5231 
    5232  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    5233 }
    5234 
    5235 
    5236 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    5237 {
    5238  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    5239  VMA_ASSERT(item->size > 0);
    5240 
    5241  // You may want to enable this validation at the beginning or at the end of
    5242  // this function, depending on what do you want to check.
    5243  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    5244 
    5245  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    5246  {
    5247  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    5248  m_FreeSuballocationsBySize.data(),
    5249  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    5250  item,
    5251  VmaSuballocationItemSizeLess());
    5252  for(size_t index = it - m_FreeSuballocationsBySize.data();
    5253  index < m_FreeSuballocationsBySize.size();
    5254  ++index)
    5255  {
    5256  if(m_FreeSuballocationsBySize[index] == item)
    5257  {
    5258  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    5259  return;
    5260  }
    5261  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    5262  }
    5263  VMA_ASSERT(0 && "Not found.");
    5264  }
    5265 
    5266  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    5267 }
    5268 
    5270 // class VmaDeviceMemoryBlock
    5271 
    5272 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    5273  m_MemoryTypeIndex(UINT32_MAX),
    5274  m_BlockVectorType(VMA_BLOCK_VECTOR_TYPE_COUNT),
    5275  m_hMemory(VK_NULL_HANDLE),
    5276  m_PersistentMap(false),
    5277  m_pMappedData(VMA_NULL),
    5278  m_Metadata(hAllocator)
    5279 {
    5280 }
    5281 
    5282 void VmaDeviceMemoryBlock::Init(
    5283  uint32_t newMemoryTypeIndex,
    5284  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
    5285  VkDeviceMemory newMemory,
    5286  VkDeviceSize newSize,
    5287  bool persistentMap,
    5288  void* pMappedData)
    5289 {
    5290  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5291 
    5292  m_MemoryTypeIndex = newMemoryTypeIndex;
    5293  m_BlockVectorType = newBlockVectorType;
    5294  m_hMemory = newMemory;
    5295  m_PersistentMap = persistentMap;
    5296  m_pMappedData = pMappedData;
    5297 
    5298  m_Metadata.Init(newSize);
    5299 }
    5300 
    5301 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    5302 {
    5303  // This is the most important assert in the entire library.
    5304  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    5305  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    5306 
    5307  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    5308  if(m_pMappedData != VMA_NULL)
    5309  {
    5310  (allocator->GetVulkanFunctions().vkUnmapMemory)(allocator->m_hDevice, m_hMemory);
    5311  m_pMappedData = VMA_NULL;
    5312  }
    5313 
    5314  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
    5315  m_hMemory = VK_NULL_HANDLE;
    5316 }
    5317 
    5318 bool VmaDeviceMemoryBlock::Validate() const
    5319 {
    5320  if((m_hMemory == VK_NULL_HANDLE) ||
    5321  (m_Metadata.GetSize() == 0))
    5322  {
    5323  return false;
    5324  }
    5325 
    5326  return m_Metadata.Validate();
    5327 }
    5328 
    5329 static void InitStatInfo(VmaStatInfo& outInfo)
    5330 {
    5331  memset(&outInfo, 0, sizeof(outInfo));
    5332  outInfo.allocationSizeMin = UINT64_MAX;
    5333  outInfo.unusedRangeSizeMin = UINT64_MAX;
    5334 }
    5335 
    5336 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    5337 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    5338 {
    5339  inoutInfo.blockCount += srcInfo.blockCount;
    5340  inoutInfo.allocationCount += srcInfo.allocationCount;
    5341  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    5342  inoutInfo.usedBytes += srcInfo.usedBytes;
    5343  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    5344  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    5345  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    5346  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    5347  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    5348 }
    5349 
    5350 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    5351 {
    5352  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    5353  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    5354  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    5355  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    5356 }
    5357 
    5358 VmaPool_T::VmaPool_T(
    5359  VmaAllocator hAllocator,
    5360  const VmaPoolCreateInfo& createInfo) :
    5361  m_BlockVector(
    5362  hAllocator,
    5363  createInfo.memoryTypeIndex,
    5364  (createInfo.flags & VMA_POOL_CREATE_PERSISTENT_MAP_BIT) != 0 ?
    5365  VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED,
    5366  createInfo.blockSize,
    5367  createInfo.minBlockCount,
    5368  createInfo.maxBlockCount,
    5369  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    5370  createInfo.frameInUseCount,
    5371  true) // isCustomPool
    5372 {
    5373 }
    5374 
    5375 VmaPool_T::~VmaPool_T()
    5376 {
    5377 }
    5378 
    5379 #if VMA_STATS_STRING_ENABLED
    5380 
    5381 #endif // #if VMA_STATS_STRING_ENABLED
    5382 
    5383 VmaBlockVector::VmaBlockVector(
    5384  VmaAllocator hAllocator,
    5385  uint32_t memoryTypeIndex,
    5386  VMA_BLOCK_VECTOR_TYPE blockVectorType,
    5387  VkDeviceSize preferredBlockSize,
    5388  size_t minBlockCount,
    5389  size_t maxBlockCount,
    5390  VkDeviceSize bufferImageGranularity,
    5391  uint32_t frameInUseCount,
    5392  bool isCustomPool) :
    5393  m_hAllocator(hAllocator),
    5394  m_MemoryTypeIndex(memoryTypeIndex),
    5395  m_BlockVectorType(blockVectorType),
    5396  m_PreferredBlockSize(preferredBlockSize),
    5397  m_MinBlockCount(minBlockCount),
    5398  m_MaxBlockCount(maxBlockCount),
    5399  m_BufferImageGranularity(bufferImageGranularity),
    5400  m_FrameInUseCount(frameInUseCount),
    5401  m_IsCustomPool(isCustomPool),
    5402  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    5403  m_HasEmptyBlock(false),
    5404  m_pDefragmentator(VMA_NULL)
    5405 {
    5406 }
    5407 
    5408 VmaBlockVector::~VmaBlockVector()
    5409 {
    5410  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
    5411 
    5412  for(size_t i = m_Blocks.size(); i--; )
    5413  {
    5414  m_Blocks[i]->Destroy(m_hAllocator);
    5415  vma_delete(m_hAllocator, m_Blocks[i]);
    5416  }
    5417 }
    5418 
    5419 VkResult VmaBlockVector::CreateMinBlocks()
    5420 {
    5421  for(size_t i = 0; i < m_MinBlockCount; ++i)
    5422  {
    5423  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    5424  if(res != VK_SUCCESS)
    5425  {
    5426  return res;
    5427  }
    5428  }
    5429  return VK_SUCCESS;
    5430 }
    5431 
    5432 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    5433 {
    5434  pStats->size = 0;
    5435  pStats->unusedSize = 0;
    5436  pStats->allocationCount = 0;
    5437  pStats->unusedRangeCount = 0;
    5438  pStats->unusedRangeSizeMax = 0;
    5439 
    5440  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5441 
    5442  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    5443  {
    5444  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    5445  VMA_ASSERT(pBlock);
    5446  VMA_HEAVY_ASSERT(pBlock->Validate());
    5447  pBlock->m_Metadata.AddPoolStats(*pStats);
    5448  }
    5449 }
    5450 
    5451 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    5452 
    5453 VkResult VmaBlockVector::Allocate(
    5454  VmaPool hCurrentPool,
    5455  uint32_t currentFrameIndex,
    5456  const VkMemoryRequirements& vkMemReq,
    5457  const VmaAllocationCreateInfo& createInfo,
    5458  VmaSuballocationType suballocType,
    5459  VmaAllocation* pAllocation)
    5460 {
    5461  // Validate flags.
    5462  if(createInfo.pool != VK_NULL_HANDLE &&
    5463  ((createInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0) != (m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED))
    5464  {
    5465  VMA_ASSERT(0 && "Usage of VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT must match VMA_POOL_CREATE_PERSISTENT_MAP_BIT.");
    5466  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    5467  }
    5468 
    5469  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5470 
    5471  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    5472  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    5473  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    5474  {
    5475  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    5476  VMA_ASSERT(pCurrBlock);
    5477  VmaAllocationRequest currRequest = {};
    5478  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
    5479  currentFrameIndex,
    5480  m_FrameInUseCount,
    5481  m_BufferImageGranularity,
    5482  vkMemReq.size,
    5483  vkMemReq.alignment,
    5484  suballocType,
    5485  false, // canMakeOtherLost
    5486  &currRequest))
    5487  {
    5488  // Allocate from pCurrBlock.
    5489  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    5490 
    5491  // We no longer have an empty Allocation.
    5492  if(pCurrBlock->m_Metadata.IsEmpty())
    5493  {
    5494  m_HasEmptyBlock = false;
    5495  }
    5496 
    5497  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
    5498  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
    5499  (*pAllocation)->InitBlockAllocation(
    5500  hCurrentPool,
    5501  pCurrBlock,
    5502  currRequest.offset,
    5503  vkMemReq.alignment,
    5504  vkMemReq.size,
    5505  suballocType,
    5506  createInfo.pUserData,
    5507  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    5508  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
    5509  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    5510  return VK_SUCCESS;
    5511  }
    5512  }
    5513 
    5514  const bool canCreateNewBlock =
    5515  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    5516  (m_Blocks.size() < m_MaxBlockCount);
    5517 
    5518  // 2. Try to create new block.
    5519  if(canCreateNewBlock)
    5520  {
    5521  // 2.1. Start with full preferredBlockSize.
    5522  VkDeviceSize blockSize = m_PreferredBlockSize;
    5523  size_t newBlockIndex = 0;
    5524  VkResult res = CreateBlock(blockSize, &newBlockIndex);
    5525  // Allocating blocks of other sizes is allowed only in default pools.
    5526  // In custom pools block size is fixed.
    5527  if(res < 0 && m_IsCustomPool == false)
    5528  {
    5529  // 2.2. Try half the size.
    5530  blockSize /= 2;
    5531  if(blockSize >= vkMemReq.size)
    5532  {
    5533  res = CreateBlock(blockSize, &newBlockIndex);
    5534  if(res < 0)
    5535  {
    5536  // 2.3. Try quarter the size.
    5537  blockSize /= 2;
    5538  if(blockSize >= vkMemReq.size)
    5539  {
    5540  res = CreateBlock(blockSize, &newBlockIndex);
    5541  }
    5542  }
    5543  }
    5544  }
    5545  if(res == VK_SUCCESS)
    5546  {
    5547  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    5548  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= vkMemReq.size);
    5549 
    5550  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
    5551  VmaAllocationRequest allocRequest;
    5552  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
    5553  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
    5554  pBlock->m_Metadata.Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
    5555  (*pAllocation)->InitBlockAllocation(
    5556  hCurrentPool,
    5557  pBlock,
    5558  allocRequest.offset,
    5559  vkMemReq.alignment,
    5560  vkMemReq.size,
    5561  suballocType,
    5562  createInfo.pUserData,
    5563  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    5564  VMA_HEAVY_ASSERT(pBlock->Validate());
    5565  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
    5566 
    5567  return VK_SUCCESS;
    5568  }
    5569  }
    5570 
    5571  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    5572 
    5573  // 3. Try to allocate from existing blocks with making other allocations lost.
    5574  if(canMakeOtherLost)
    5575  {
    5576  uint32_t tryIndex = 0;
    5577  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    5578  {
    5579  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    5580  VmaAllocationRequest bestRequest = {};
    5581  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    5582 
    5583  // 1. Search existing allocations.
    5584  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    5585  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    5586  {
    5587  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    5588  VMA_ASSERT(pCurrBlock);
    5589  VmaAllocationRequest currRequest = {};
    5590  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
    5591  currentFrameIndex,
    5592  m_FrameInUseCount,
    5593  m_BufferImageGranularity,
    5594  vkMemReq.size,
    5595  vkMemReq.alignment,
    5596  suballocType,
    5597  canMakeOtherLost,
    5598  &currRequest))
    5599  {
    5600  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    5601  if(pBestRequestBlock == VMA_NULL ||
    5602  currRequestCost < bestRequestCost)
    5603  {
    5604  pBestRequestBlock = pCurrBlock;
    5605  bestRequest = currRequest;
    5606  bestRequestCost = currRequestCost;
    5607 
    5608  if(bestRequestCost == 0)
    5609  {
    5610  break;
    5611  }
    5612  }
    5613  }
    5614  }
    5615 
    5616  if(pBestRequestBlock != VMA_NULL)
    5617  {
    5618  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
    5619  currentFrameIndex,
    5620  m_FrameInUseCount,
    5621  &bestRequest))
    5622  {
    5623  // We no longer have an empty Allocation.
    5624  if(pBestRequestBlock->m_Metadata.IsEmpty())
    5625  {
    5626  m_HasEmptyBlock = false;
    5627  }
    5628  // Allocate from this pBlock.
    5629  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
    5630  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
    5631  (*pAllocation)->InitBlockAllocation(
    5632  hCurrentPool,
    5633  pBestRequestBlock,
    5634  bestRequest.offset,
    5635  vkMemReq.alignment,
    5636  vkMemReq.size,
    5637  suballocType,
    5638  createInfo.pUserData,
    5639  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    5640  VMA_HEAVY_ASSERT(pBlock->Validate());
    5641  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    5642  return VK_SUCCESS;
    5643  }
    5644  // else: Some allocations must have been touched while we are here. Next try.
    5645  }
    5646  else
    5647  {
    5648  // Could not find place in any of the blocks - break outer loop.
    5649  break;
    5650  }
    5651  }
    5652  /* Maximum number of tries exceeded - a very unlike event when many other
    5653  threads are simultaneously touching allocations making it impossible to make
    5654  lost at the same time as we try to allocate. */
    5655  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    5656  {
    5657  return VK_ERROR_TOO_MANY_OBJECTS;
    5658  }
    5659  }
    5660 
    5661  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    5662 }
    5663 
    5664 void VmaBlockVector::Free(
    5665  VmaAllocation hAllocation)
    5666 {
    5667  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    5668 
    5669  // Scope for lock.
    5670  {
    5671  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5672 
    5673  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    5674 
    5675  pBlock->m_Metadata.Free(hAllocation);
    5676  VMA_HEAVY_ASSERT(pBlock->Validate());
    5677 
    5678  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
    5679 
    5680  // pBlock became empty after this deallocation.
    5681  if(pBlock->m_Metadata.IsEmpty())
    5682  {
    5683  // Already has empty Allocation. We don't want to have two, so delete this one.
    5684  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    5685  {
    5686  pBlockToDelete = pBlock;
    5687  Remove(pBlock);
    5688  }
    5689  // We now have first empty Allocation.
    5690  else
    5691  {
    5692  m_HasEmptyBlock = true;
    5693  }
    5694  }
    5695  // pBlock didn't become empty, but we have another empty block - find and free that one.
    5696  // (This is optional, heuristics.)
    5697  else if(m_HasEmptyBlock)
    5698  {
    5699  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    5700  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    5701  {
    5702  pBlockToDelete = pLastBlock;
    5703  m_Blocks.pop_back();
    5704  m_HasEmptyBlock = false;
    5705  }
    5706  }
    5707 
    5708  IncrementallySortBlocks();
    5709  }
    5710 
    5711  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    5712  // lock, for performance reason.
    5713  if(pBlockToDelete != VMA_NULL)
    5714  {
    5715  VMA_DEBUG_LOG(" Deleted empty allocation");
    5716  pBlockToDelete->Destroy(m_hAllocator);
    5717  vma_delete(m_hAllocator, pBlockToDelete);
    5718  }
    5719 }
    5720 
    5721 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    5722 {
    5723  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    5724  {
    5725  if(m_Blocks[blockIndex] == pBlock)
    5726  {
    5727  VmaVectorRemove(m_Blocks, blockIndex);
    5728  return;
    5729  }
    5730  }
    5731  VMA_ASSERT(0);
    5732 }
    5733 
    5734 void VmaBlockVector::IncrementallySortBlocks()
    5735 {
    5736  // Bubble sort only until first swap.
    5737  for(size_t i = 1; i < m_Blocks.size(); ++i)
    5738  {
    5739  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
    5740  {
    5741  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    5742  return;
    5743  }
    5744  }
    5745 }
    5746 
    5747 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    5748 {
    5749  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    5750  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    5751  allocInfo.allocationSize = blockSize;
    5752  VkDeviceMemory mem = VK_NULL_HANDLE;
    5753  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    5754  if(res < 0)
    5755  {
    5756  return res;
    5757  }
    5758 
    5759  // New VkDeviceMemory successfully created.
    5760 
    5761  // Map memory if needed.
    5762  void* pMappedData = VMA_NULL;
    5763  const bool persistentMap = (m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED);
    5764  if(persistentMap && m_hAllocator->m_UnmapPersistentlyMappedMemoryCounter == 0)
    5765  {
    5766  res = (*m_hAllocator->GetVulkanFunctions().vkMapMemory)(
    5767  m_hAllocator->m_hDevice,
    5768  mem,
    5769  0,
    5770  VK_WHOLE_SIZE,
    5771  0,
    5772  &pMappedData);
    5773  if(res < 0)
    5774  {
    5775  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    5776  m_hAllocator->FreeVulkanMemory(m_MemoryTypeIndex, blockSize, mem);
    5777  return res;
    5778  }
    5779  }
    5780 
    5781  // Create new Allocation for it.
    5782  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    5783  pBlock->Init(
    5784  m_MemoryTypeIndex,
    5785  (VMA_BLOCK_VECTOR_TYPE)m_BlockVectorType,
    5786  mem,
    5787  allocInfo.allocationSize,
    5788  persistentMap,
    5789  pMappedData);
    5790 
    5791  m_Blocks.push_back(pBlock);
    5792  if(pNewBlockIndex != VMA_NULL)
    5793  {
    5794  *pNewBlockIndex = m_Blocks.size() - 1;
    5795  }
    5796 
    5797  return VK_SUCCESS;
    5798 }
    5799 
    5800 #if VMA_STATS_STRING_ENABLED
    5801 
    5802 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    5803 {
    5804  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5805 
    5806  json.BeginObject();
    5807 
    5808  if(m_IsCustomPool)
    5809  {
    5810  json.WriteString("MemoryTypeIndex");
    5811  json.WriteNumber(m_MemoryTypeIndex);
    5812 
    5813  if(m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
    5814  {
    5815  json.WriteString("Mapped");
    5816  json.WriteBool(true);
    5817  }
    5818 
    5819  json.WriteString("BlockSize");
    5820  json.WriteNumber(m_PreferredBlockSize);
    5821 
    5822  json.WriteString("BlockCount");
    5823  json.BeginObject(true);
    5824  if(m_MinBlockCount > 0)
    5825  {
    5826  json.WriteString("Min");
    5827  json.WriteNumber(m_MinBlockCount);
    5828  }
    5829  if(m_MaxBlockCount < SIZE_MAX)
    5830  {
    5831  json.WriteString("Max");
    5832  json.WriteNumber(m_MaxBlockCount);
    5833  }
    5834  json.WriteString("Cur");
    5835  json.WriteNumber(m_Blocks.size());
    5836  json.EndObject();
    5837 
    5838  if(m_FrameInUseCount > 0)
    5839  {
    5840  json.WriteString("FrameInUseCount");
    5841  json.WriteNumber(m_FrameInUseCount);
    5842  }
    5843  }
    5844  else
    5845  {
    5846  json.WriteString("PreferredBlockSize");
    5847  json.WriteNumber(m_PreferredBlockSize);
    5848  }
    5849 
    5850  json.WriteString("Blocks");
    5851  json.BeginArray();
    5852  for(size_t i = 0; i < m_Blocks.size(); ++i)
    5853  {
    5854  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
    5855  }
    5856  json.EndArray();
    5857 
    5858  json.EndObject();
    5859 }
    5860 
    5861 #endif // #if VMA_STATS_STRING_ENABLED
    5862 
    5863 void VmaBlockVector::UnmapPersistentlyMappedMemory()
    5864 {
    5865  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5866 
    5867  for(size_t i = m_Blocks.size(); i--; )
    5868  {
    5869  VmaDeviceMemoryBlock* pBlock = m_Blocks[i];
    5870  if(pBlock->m_pMappedData != VMA_NULL)
    5871  {
    5872  VMA_ASSERT(pBlock->m_PersistentMap != false);
    5873  (m_hAllocator->GetVulkanFunctions().vkUnmapMemory)(m_hAllocator->m_hDevice, pBlock->m_hMemory);
    5874  pBlock->m_pMappedData = VMA_NULL;
    5875  }
    5876  }
    5877 }
    5878 
    5879 VkResult VmaBlockVector::MapPersistentlyMappedMemory()
    5880 {
    5881  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5882 
    5883  VkResult finalResult = VK_SUCCESS;
    5884  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
    5885  {
    5886  VmaDeviceMemoryBlock* pBlock = m_Blocks[i];
    5887  if(pBlock->m_PersistentMap)
    5888  {
    5889  VMA_ASSERT(pBlock->m_pMappedData == nullptr);
    5890  VkResult localResult = (*m_hAllocator->GetVulkanFunctions().vkMapMemory)(
    5891  m_hAllocator->m_hDevice,
    5892  pBlock->m_hMemory,
    5893  0,
    5894  VK_WHOLE_SIZE,
    5895  0,
    5896  &pBlock->m_pMappedData);
    5897  if(localResult != VK_SUCCESS)
    5898  {
    5899  finalResult = localResult;
    5900  }
    5901  }
    5902  }
    5903  return finalResult;
    5904 }
    5905 
    5906 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
    5907  VmaAllocator hAllocator,
    5908  uint32_t currentFrameIndex)
    5909 {
    5910  if(m_pDefragmentator == VMA_NULL)
    5911  {
    5912  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
    5913  hAllocator,
    5914  this,
    5915  currentFrameIndex);
    5916  }
    5917 
    5918  return m_pDefragmentator;
    5919 }
    5920 
    5921 VkResult VmaBlockVector::Defragment(
    5922  VmaDefragmentationStats* pDefragmentationStats,
    5923  VkDeviceSize& maxBytesToMove,
    5924  uint32_t& maxAllocationsToMove)
    5925 {
    5926  if(m_pDefragmentator == VMA_NULL)
    5927  {
    5928  return VK_SUCCESS;
    5929  }
    5930 
    5931  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5932 
    5933  // Defragment.
    5934  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
    5935 
    5936  // Accumulate statistics.
    5937  if(pDefragmentationStats != VMA_NULL)
    5938  {
    5939  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
    5940  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
    5941  pDefragmentationStats->bytesMoved += bytesMoved;
    5942  pDefragmentationStats->allocationsMoved += allocationsMoved;
    5943  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    5944  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    5945  maxBytesToMove -= bytesMoved;
    5946  maxAllocationsToMove -= allocationsMoved;
    5947  }
    5948 
    5949  // Free empty blocks.
    5950  m_HasEmptyBlock = false;
    5951  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    5952  {
    5953  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    5954  if(pBlock->m_Metadata.IsEmpty())
    5955  {
    5956  if(m_Blocks.size() > m_MinBlockCount)
    5957  {
    5958  if(pDefragmentationStats != VMA_NULL)
    5959  {
    5960  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    5961  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
    5962  }
    5963 
    5964  VmaVectorRemove(m_Blocks, blockIndex);
    5965  pBlock->Destroy(m_hAllocator);
    5966  vma_delete(m_hAllocator, pBlock);
    5967  }
    5968  else
    5969  {
    5970  m_HasEmptyBlock = true;
    5971  }
    5972  }
    5973  }
    5974 
    5975  return result;
    5976 }
    5977 
    5978 void VmaBlockVector::DestroyDefragmentator()
    5979 {
    5980  if(m_pDefragmentator != VMA_NULL)
    5981  {
    5982  vma_delete(m_hAllocator, m_pDefragmentator);
    5983  m_pDefragmentator = VMA_NULL;
    5984  }
    5985 }
    5986 
    5987 void VmaBlockVector::MakePoolAllocationsLost(
    5988  uint32_t currentFrameIndex,
    5989  size_t* pLostAllocationCount)
    5990 {
    5991  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5992 
    5993  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    5994  {
    5995  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    5996  VMA_ASSERT(pBlock);
    5997  pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    5998  }
    5999 }
    6000 
    6001 void VmaBlockVector::AddStats(VmaStats* pStats)
    6002 {
    6003  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    6004  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    6005 
    6006  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    6007 
    6008  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    6009  {
    6010  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    6011  VMA_ASSERT(pBlock);
    6012  VMA_HEAVY_ASSERT(pBlock->Validate());
    6013  VmaStatInfo allocationStatInfo;
    6014  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
    6015  VmaAddStatInfo(pStats->total, allocationStatInfo);
    6016  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    6017  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    6018  }
    6019 }
    6020 
    6022 // VmaDefragmentator members definition
    6023 
    6024 VmaDefragmentator::VmaDefragmentator(
    6025  VmaAllocator hAllocator,
    6026  VmaBlockVector* pBlockVector,
    6027  uint32_t currentFrameIndex) :
    6028  m_hAllocator(hAllocator),
    6029  m_pBlockVector(pBlockVector),
    6030  m_CurrentFrameIndex(currentFrameIndex),
    6031  m_BytesMoved(0),
    6032  m_AllocationsMoved(0),
    6033  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
    6034  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    6035 {
    6036 }
    6037 
    6038 VmaDefragmentator::~VmaDefragmentator()
    6039 {
    6040  for(size_t i = m_Blocks.size(); i--; )
    6041  {
    6042  vma_delete(m_hAllocator, m_Blocks[i]);
    6043  }
    6044 }
    6045 
    6046 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    6047 {
    6048  AllocationInfo allocInfo;
    6049  allocInfo.m_hAllocation = hAlloc;
    6050  allocInfo.m_pChanged = pChanged;
    6051  m_Allocations.push_back(allocInfo);
    6052 }
    6053 
    6054 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
    6055 {
    6056  // It has already been mapped for defragmentation.
    6057  if(m_pMappedDataForDefragmentation)
    6058  {
    6059  *ppMappedData = m_pMappedDataForDefragmentation;
    6060  return VK_SUCCESS;
    6061  }
    6062 
    6063  // It is persistently mapped.
    6064  if(m_pBlock->m_PersistentMap)
    6065  {
    6066  VMA_ASSERT(m_pBlock->m_pMappedData != VMA_NULL);
    6067  *ppMappedData = m_pBlock->m_pMappedData;
    6068  return VK_SUCCESS;
    6069  }
    6070 
    6071  // Map on first usage.
    6072  VkResult res = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    6073  hAllocator->m_hDevice,
    6074  m_pBlock->m_hMemory,
    6075  0,
    6076  VK_WHOLE_SIZE,
    6077  0,
    6078  &m_pMappedDataForDefragmentation);
    6079  *ppMappedData = m_pMappedDataForDefragmentation;
    6080  return res;
    6081 }
    6082 
    6083 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
    6084 {
    6085  if(m_pMappedDataForDefragmentation != VMA_NULL)
    6086  {
    6087  (hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_pBlock->m_hMemory);
    6088  }
    6089 }
    6090 
    6091 VkResult VmaDefragmentator::DefragmentRound(
    6092  VkDeviceSize maxBytesToMove,
    6093  uint32_t maxAllocationsToMove)
    6094 {
    6095  if(m_Blocks.empty())
    6096  {
    6097  return VK_SUCCESS;
    6098  }
    6099 
    6100  size_t srcBlockIndex = m_Blocks.size() - 1;
    6101  size_t srcAllocIndex = SIZE_MAX;
    6102  for(;;)
    6103  {
    6104  // 1. Find next allocation to move.
    6105  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    6106  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
    6107  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    6108  {
    6109  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    6110  {
    6111  // Finished: no more allocations to process.
    6112  if(srcBlockIndex == 0)
    6113  {
    6114  return VK_SUCCESS;
    6115  }
    6116  else
    6117  {
    6118  --srcBlockIndex;
    6119  srcAllocIndex = SIZE_MAX;
    6120  }
    6121  }
    6122  else
    6123  {
    6124  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    6125  }
    6126  }
    6127 
    6128  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    6129  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    6130 
    6131  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    6132  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    6133  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    6134  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    6135 
    6136  // 2. Try to find new place for this allocation in preceding or current block.
    6137  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    6138  {
    6139  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    6140  VmaAllocationRequest dstAllocRequest;
    6141  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
    6142  m_CurrentFrameIndex,
    6143  m_pBlockVector->GetFrameInUseCount(),
    6144  m_pBlockVector->GetBufferImageGranularity(),
    6145  size,
    6146  alignment,
    6147  suballocType,
    6148  false, // canMakeOtherLost
    6149  &dstAllocRequest) &&
    6150  MoveMakesSense(
    6151  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    6152  {
    6153  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    6154 
    6155  // Reached limit on number of allocations or bytes to move.
    6156  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    6157  (m_BytesMoved + size > maxBytesToMove))
    6158  {
    6159  return VK_INCOMPLETE;
    6160  }
    6161 
    6162  void* pDstMappedData = VMA_NULL;
    6163  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
    6164  if(res != VK_SUCCESS)
    6165  {
    6166  return res;
    6167  }
    6168 
    6169  void* pSrcMappedData = VMA_NULL;
    6170  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
    6171  if(res != VK_SUCCESS)
    6172  {
    6173  return res;
    6174  }
    6175 
    6176  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    6177  memcpy(
    6178  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
    6179  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
    6180  static_cast<size_t>(size));
    6181 
    6182  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
    6183  pSrcBlockInfo->m_pBlock->m_Metadata.Free(allocInfo.m_hAllocation);
    6184 
    6185  allocInfo.m_hAllocation->ChangeBlockAllocation(pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    6186 
    6187  if(allocInfo.m_pChanged != VMA_NULL)
    6188  {
    6189  *allocInfo.m_pChanged = VK_TRUE;
    6190  }
    6191 
    6192  ++m_AllocationsMoved;
    6193  m_BytesMoved += size;
    6194 
    6195  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    6196 
    6197  break;
    6198  }
    6199  }
    6200 
    6201  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    6202 
    6203  if(srcAllocIndex > 0)
    6204  {
    6205  --srcAllocIndex;
    6206  }
    6207  else
    6208  {
    6209  if(srcBlockIndex > 0)
    6210  {
    6211  --srcBlockIndex;
    6212  srcAllocIndex = SIZE_MAX;
    6213  }
    6214  else
    6215  {
    6216  return VK_SUCCESS;
    6217  }
    6218  }
    6219  }
    6220 }
    6221 
    6222 VkResult VmaDefragmentator::Defragment(
    6223  VkDeviceSize maxBytesToMove,
    6224  uint32_t maxAllocationsToMove)
    6225 {
    6226  if(m_Allocations.empty())
    6227  {
    6228  return VK_SUCCESS;
    6229  }
    6230 
    6231  // Create block info for each block.
    6232  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    6233  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    6234  {
    6235  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    6236  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    6237  m_Blocks.push_back(pBlockInfo);
    6238  }
    6239 
    6240  // Sort them by m_pBlock pointer value.
    6241  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    6242 
    6243  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
    6244  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
    6245  {
    6246  AllocationInfo& allocInfo = m_Allocations[blockIndex];
    6247  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    6248  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    6249  {
    6250  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
    6251  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    6252  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    6253  {
    6254  (*it)->m_Allocations.push_back(allocInfo);
    6255  }
    6256  else
    6257  {
    6258  VMA_ASSERT(0);
    6259  }
    6260  }
    6261  }
    6262  m_Allocations.clear();
    6263 
    6264  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    6265  {
    6266  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    6267  pBlockInfo->CalcHasNonMovableAllocations();
    6268  pBlockInfo->SortAllocationsBySizeDescecnding();
    6269  }
    6270 
    6271  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    6272  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    6273 
    6274  // Execute defragmentation rounds (the main part).
    6275  VkResult result = VK_SUCCESS;
    6276  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
    6277  {
    6278  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
    6279  }
    6280 
    6281  // Unmap blocks that were mapped for defragmentation.
    6282  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    6283  {
    6284  m_Blocks[blockIndex]->Unmap(m_hAllocator);
    6285  }
    6286 
    6287  return result;
    6288 }
    6289 
    6290 bool VmaDefragmentator::MoveMakesSense(
    6291  size_t dstBlockIndex, VkDeviceSize dstOffset,
    6292  size_t srcBlockIndex, VkDeviceSize srcOffset)
    6293 {
    6294  if(dstBlockIndex < srcBlockIndex)
    6295  {
    6296  return true;
    6297  }
    6298  if(dstBlockIndex > srcBlockIndex)
    6299  {
    6300  return false;
    6301  }
    6302  if(dstOffset < srcOffset)
    6303  {
    6304  return true;
    6305  }
    6306  return false;
    6307 }
    6308 
    6310 // VmaAllocator_T
    6311 
    6312 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    6313  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    6314  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    6315  m_PhysicalDevice(pCreateInfo->physicalDevice),
    6316  m_hDevice(pCreateInfo->device),
    6317  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    6318  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    6319  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    6320  m_UnmapPersistentlyMappedMemoryCounter(0),
    6321  m_PreferredLargeHeapBlockSize(0),
    6322  m_PreferredSmallHeapBlockSize(0),
    6323  m_CurrentFrameIndex(0),
    6324  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
    6325 {
    6326  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    6327 
    6328  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    6329  memset(&m_MemProps, 0, sizeof(m_MemProps));
    6330  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    6331 
    6332  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    6333  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    6334 
    6335  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    6336  {
    6337  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    6338  }
    6339 
    6340  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    6341  {
    6342  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    6343  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    6344  }
    6345 
    6346  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    6347 
    6348  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    6349  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    6350 
    6351  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    6352  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    6353  m_PreferredSmallHeapBlockSize = (pCreateInfo->preferredSmallHeapBlockSize != 0) ?
    6354  pCreateInfo->preferredSmallHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE);
    6355 
    6356  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    6357  {
    6358  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    6359  {
    6360  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    6361  if(limit != VK_WHOLE_SIZE)
    6362  {
    6363  m_HeapSizeLimit[heapIndex] = limit;
    6364  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    6365  {
    6366  m_MemProps.memoryHeaps[heapIndex].size = limit;
    6367  }
    6368  }
    6369  }
    6370  }
    6371 
    6372  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    6373  {
    6374  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    6375 
    6376  for(size_t blockVectorTypeIndex = 0; blockVectorTypeIndex < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorTypeIndex)
    6377  {
    6378  m_pBlockVectors[memTypeIndex][blockVectorTypeIndex] = vma_new(this, VmaBlockVector)(
    6379  this,
    6380  memTypeIndex,
    6381  static_cast<VMA_BLOCK_VECTOR_TYPE>(blockVectorTypeIndex),
    6382  preferredBlockSize,
    6383  0,
    6384  SIZE_MAX,
    6385  GetBufferImageGranularity(),
    6386  pCreateInfo->frameInUseCount,
    6387  false); // isCustomPool
    6388  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    6389  // becase minBlockCount is 0.
    6390  m_pDedicatedAllocations[memTypeIndex][blockVectorTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    6391  }
    6392  }
    6393 }
    6394 
    6395 VmaAllocator_T::~VmaAllocator_T()
    6396 {
    6397  VMA_ASSERT(m_Pools.empty());
    6398 
    6399  for(size_t i = GetMemoryTypeCount(); i--; )
    6400  {
    6401  for(size_t j = VMA_BLOCK_VECTOR_TYPE_COUNT; j--; )
    6402  {
    6403  vma_delete(this, m_pDedicatedAllocations[i][j]);
    6404  vma_delete(this, m_pBlockVectors[i][j]);
    6405  }
    6406  }
    6407 }
    6408 
    6409 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    6410 {
    6411 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    6412  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
    6413  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
    6414  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    6415  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
    6416  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
    6417  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
    6418  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
    6419  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
    6420  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
    6421  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
    6422  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
    6423  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
    6424  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
    6425  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
    6426  // Ignoring vkGetBufferMemoryRequirements2KHR.
    6427  // Ignoring vkGetImageMemoryRequirements2KHR.
    6428 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    6429 
    6430 #define VMA_COPY_IF_NOT_NULL(funcName) \
    6431  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    6432 
    6433  if(pVulkanFunctions != VMA_NULL)
    6434  {
    6435  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    6436  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    6437  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    6438  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    6439  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    6440  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    6441  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    6442  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    6443  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    6444  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    6445  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    6446  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    6447  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    6448  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    6449  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    6450  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    6451  }
    6452 
    6453 #undef VMA_COPY_IF_NOT_NULL
    6454 
    6455  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    6456  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    6457  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    6458  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    6459  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    6460  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    6461  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    6462  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    6463  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    6464  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    6465  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    6466  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    6467  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    6468  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    6469  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    6470  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    6471  if(m_UseKhrDedicatedAllocation)
    6472  {
    6473  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    6474  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    6475  }
    6476 }
    6477 
    6478 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    6479 {
    6480  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    6481  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    6482  return (heapSize <= VMA_SMALL_HEAP_MAX_SIZE) ?
    6483  m_PreferredSmallHeapBlockSize : m_PreferredLargeHeapBlockSize;
    6484 }
    6485 
    6486 VkResult VmaAllocator_T::AllocateMemoryOfType(
    6487  const VkMemoryRequirements& vkMemReq,
    6488  bool dedicatedAllocation,
    6489  VkBuffer dedicatedBuffer,
    6490  VkImage dedicatedImage,
    6491  const VmaAllocationCreateInfo& createInfo,
    6492  uint32_t memTypeIndex,
    6493  VmaSuballocationType suballocType,
    6494  VmaAllocation* pAllocation)
    6495 {
    6496  VMA_ASSERT(pAllocation != VMA_NULL);
    6497  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
    6498 
    6499  uint32_t blockVectorType = VmaAllocationCreateFlagsToBlockVectorType(createInfo.flags);
    6500  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
    6501  VMA_ASSERT(blockVector);
    6502 
    6503  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    6504 
    6505  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    6506  bool preferDedicatedMemory =
    6507  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    6508  dedicatedAllocation ||
    6509  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    6510  vkMemReq.size > preferredBlockSize / 2;
    6511 
    6512  if(preferDedicatedMemory &&
    6513  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    6514  finalCreateInfo.pool == VK_NULL_HANDLE)
    6515  {
    6517  }
    6518 
    6519  // If memory type is not HOST_VISIBLE, disable PERSISTENT_MAP.
    6520  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0 &&
    6521  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    6522  {
    6523  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT;
    6524  }
    6525 
    6526  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    6527  {
    6528  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    6529  {
    6530  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6531  }
    6532  else
    6533  {
    6534  return AllocateDedicatedMemory(
    6535  vkMemReq.size,
    6536  suballocType,
    6537  memTypeIndex,
    6538  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0,
    6539  finalCreateInfo.pUserData,
    6540  dedicatedBuffer,
    6541  dedicatedImage,
    6542  pAllocation);
    6543  }
    6544  }
    6545  else
    6546  {
    6547  VkResult res = blockVector->Allocate(
    6548  VK_NULL_HANDLE, // hCurrentPool
    6549  m_CurrentFrameIndex.load(),
    6550  vkMemReq,
    6551  finalCreateInfo,
    6552  suballocType,
    6553  pAllocation);
    6554  if(res == VK_SUCCESS)
    6555  {
    6556  return res;
    6557  }
    6558 
    6559  // 5. Try dedicated memory.
    6560  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    6561  {
    6562  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6563  }
    6564  else
    6565  {
    6566  res = AllocateDedicatedMemory(
    6567  vkMemReq.size,
    6568  suballocType,
    6569  memTypeIndex,
    6570  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0,
    6571  finalCreateInfo.pUserData,
    6572  dedicatedBuffer,
    6573  dedicatedImage,
    6574  pAllocation);
    6575  if(res == VK_SUCCESS)
    6576  {
    6577  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    6578  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    6579  return VK_SUCCESS;
    6580  }
    6581  else
    6582  {
    6583  // Everything failed: Return error code.
    6584  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    6585  return res;
    6586  }
    6587  }
    6588  }
    6589 }
    6590 
    6591 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    6592  VkDeviceSize size,
    6593  VmaSuballocationType suballocType,
    6594  uint32_t memTypeIndex,
    6595  bool map,
    6596  void* pUserData,
    6597  VkBuffer dedicatedBuffer,
    6598  VkImage dedicatedImage,
    6599  VmaAllocation* pAllocation)
    6600 {
    6601  VMA_ASSERT(pAllocation);
    6602 
    6603  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    6604  allocInfo.memoryTypeIndex = memTypeIndex;
    6605  allocInfo.allocationSize = size;
    6606 
    6607  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    6608  if(m_UseKhrDedicatedAllocation)
    6609  {
    6610  if(dedicatedBuffer != VK_NULL_HANDLE)
    6611  {
    6612  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    6613  dedicatedAllocInfo.buffer = dedicatedBuffer;
    6614  allocInfo.pNext = &dedicatedAllocInfo;
    6615  }
    6616  else if(dedicatedImage != VK_NULL_HANDLE)
    6617  {
    6618  dedicatedAllocInfo.image = dedicatedImage;
    6619  allocInfo.pNext = &dedicatedAllocInfo;
    6620  }
    6621  }
    6622 
    6623  // Allocate VkDeviceMemory.
    6624  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    6625  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    6626  if(res < 0)
    6627  {
    6628  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    6629  return res;
    6630  }
    6631 
    6632  void* pMappedData = nullptr;
    6633  if(map)
    6634  {
    6635  if(m_UnmapPersistentlyMappedMemoryCounter == 0)
    6636  {
    6637  res = (*m_VulkanFunctions.vkMapMemory)(
    6638  m_hDevice,
    6639  hMemory,
    6640  0,
    6641  VK_WHOLE_SIZE,
    6642  0,
    6643  &pMappedData);
    6644  if(res < 0)
    6645  {
    6646  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    6647  FreeVulkanMemory(memTypeIndex, size, hMemory);
    6648  return res;
    6649  }
    6650  }
    6651  }
    6652 
    6653  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load());
    6654  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, map, pMappedData, size, pUserData);
    6655 
    6656  // Register it in m_pDedicatedAllocations.
    6657  {
    6658  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    6659  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex][map ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED];
    6660  VMA_ASSERT(pDedicatedAllocations);
    6661  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
    6662  }
    6663 
    6664  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
    6665 
    6666  return VK_SUCCESS;
    6667 }
    6668 
    6669 void VmaAllocator_T::GetBufferMemoryRequirements(
    6670  VkBuffer hBuffer,
    6671  VkMemoryRequirements& memReq,
    6672  bool& requiresDedicatedAllocation,
    6673  bool& prefersDedicatedAllocation) const
    6674 {
    6675  if(m_UseKhrDedicatedAllocation)
    6676  {
    6677  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    6678  memReqInfo.buffer = hBuffer;
    6679 
    6680  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    6681 
    6682  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    6683  memReq2.pNext = &memDedicatedReq;
    6684 
    6685  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    6686 
    6687  memReq = memReq2.memoryRequirements;
    6688  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    6689  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    6690  }
    6691  else
    6692  {
    6693  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    6694  requiresDedicatedAllocation = false;
    6695  prefersDedicatedAllocation = false;
    6696  }
    6697 }
    6698 
    6699 void VmaAllocator_T::GetImageMemoryRequirements(
    6700  VkImage hImage,
    6701  VkMemoryRequirements& memReq,
    6702  bool& requiresDedicatedAllocation,
    6703  bool& prefersDedicatedAllocation) const
    6704 {
    6705  if(m_UseKhrDedicatedAllocation)
    6706  {
    6707  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    6708  memReqInfo.image = hImage;
    6709 
    6710  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    6711 
    6712  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    6713  memReq2.pNext = &memDedicatedReq;
    6714 
    6715  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    6716 
    6717  memReq = memReq2.memoryRequirements;
    6718  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    6719  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    6720  }
    6721  else
    6722  {
    6723  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    6724  requiresDedicatedAllocation = false;
    6725  prefersDedicatedAllocation = false;
    6726  }
    6727 }
    6728 
    6729 VkResult VmaAllocator_T::AllocateMemory(
    6730  const VkMemoryRequirements& vkMemReq,
    6731  bool requiresDedicatedAllocation,
    6732  bool prefersDedicatedAllocation,
    6733  VkBuffer dedicatedBuffer,
    6734  VkImage dedicatedImage,
    6735  const VmaAllocationCreateInfo& createInfo,
    6736  VmaSuballocationType suballocType,
    6737  VmaAllocation* pAllocation)
    6738 {
    6739  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    6740  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    6741  {
    6742  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    6743  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6744  }
    6745  if(requiresDedicatedAllocation)
    6746  {
    6747  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    6748  {
    6749  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    6750  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6751  }
    6752  if(createInfo.pool != VK_NULL_HANDLE)
    6753  {
    6754  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    6755  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6756  }
    6757  }
    6758  if((createInfo.pool != VK_NULL_HANDLE) &&
    6759  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    6760  {
    6761  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    6762  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6763  }
    6764 
    6765  if(createInfo.pool != VK_NULL_HANDLE)
    6766  {
    6767  return createInfo.pool->m_BlockVector.Allocate(
    6768  createInfo.pool,
    6769  m_CurrentFrameIndex.load(),
    6770  vkMemReq,
    6771  createInfo,
    6772  suballocType,
    6773  pAllocation);
    6774  }
    6775  else
    6776  {
    6777  // Bit mask of memory Vulkan types acceptable for this allocation.
    6778  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    6779  uint32_t memTypeIndex = UINT32_MAX;
    6780  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    6781  if(res == VK_SUCCESS)
    6782  {
    6783  res = AllocateMemoryOfType(
    6784  vkMemReq,
    6785  requiresDedicatedAllocation || prefersDedicatedAllocation,
    6786  dedicatedBuffer,
    6787  dedicatedImage,
    6788  createInfo,
    6789  memTypeIndex,
    6790  suballocType,
    6791  pAllocation);
    6792  // Succeeded on first try.
    6793  if(res == VK_SUCCESS)
    6794  {
    6795  return res;
    6796  }
    6797  // Allocation from this memory type failed. Try other compatible memory types.
    6798  else
    6799  {
    6800  for(;;)
    6801  {
    6802  // Remove old memTypeIndex from list of possibilities.
    6803  memoryTypeBits &= ~(1u << memTypeIndex);
    6804  // Find alternative memTypeIndex.
    6805  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    6806  if(res == VK_SUCCESS)
    6807  {
    6808  res = AllocateMemoryOfType(
    6809  vkMemReq,
    6810  requiresDedicatedAllocation || prefersDedicatedAllocation,
    6811  dedicatedBuffer,
    6812  dedicatedImage,
    6813  createInfo,
    6814  memTypeIndex,
    6815  suballocType,
    6816  pAllocation);
    6817  // Allocation from this alternative memory type succeeded.
    6818  if(res == VK_SUCCESS)
    6819  {
    6820  return res;
    6821  }
    6822  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    6823  }
    6824  // No other matching memory type index could be found.
    6825  else
    6826  {
    6827  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    6828  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6829  }
    6830  }
    6831  }
    6832  }
    6833  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    6834  else
    6835  return res;
    6836  }
    6837 }
    6838 
    6839 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
    6840 {
    6841  VMA_ASSERT(allocation);
    6842 
    6843  if(allocation->CanBecomeLost() == false ||
    6844  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    6845  {
    6846  switch(allocation->GetType())
    6847  {
    6848  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    6849  {
    6850  VmaBlockVector* pBlockVector = VMA_NULL;
    6851  VmaPool hPool = allocation->GetPool();
    6852  if(hPool != VK_NULL_HANDLE)
    6853  {
    6854  pBlockVector = &hPool->m_BlockVector;
    6855  }
    6856  else
    6857  {
    6858  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    6859  const VMA_BLOCK_VECTOR_TYPE blockVectorType = allocation->GetBlockVectorType();
    6860  pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
    6861  }
    6862  pBlockVector->Free(allocation);
    6863  }
    6864  break;
    6865  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    6866  FreeDedicatedMemory(allocation);
    6867  break;
    6868  default:
    6869  VMA_ASSERT(0);
    6870  }
    6871  }
    6872 
    6873  vma_delete(this, allocation);
    6874 }
    6875 
    6876 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    6877 {
    6878  // Initialize.
    6879  InitStatInfo(pStats->total);
    6880  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    6881  InitStatInfo(pStats->memoryType[i]);
    6882  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    6883  InitStatInfo(pStats->memoryHeap[i]);
    6884 
    6885  // Process default pools.
    6886  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    6887  {
    6888  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    6889  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
    6890  {
    6891  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
    6892  VMA_ASSERT(pBlockVector);
    6893  pBlockVector->AddStats(pStats);
    6894  }
    6895  }
    6896 
    6897  // Process custom pools.
    6898  {
    6899  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    6900  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    6901  {
    6902  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
    6903  }
    6904  }
    6905 
    6906  // Process dedicated allocations.
    6907  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    6908  {
    6909  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    6910  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    6911  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
    6912  {
    6913  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex][blockVectorType];
    6914  VMA_ASSERT(pDedicatedAllocVector);
    6915  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    6916  {
    6917  VmaStatInfo allocationStatInfo;
    6918  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    6919  VmaAddStatInfo(pStats->total, allocationStatInfo);
    6920  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    6921  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    6922  }
    6923  }
    6924  }
    6925 
    6926  // Postprocess.
    6927  VmaPostprocessCalcStatInfo(pStats->total);
    6928  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    6929  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    6930  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    6931  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    6932 }
    6933 
    6934 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    6935 
    6936 void VmaAllocator_T::UnmapPersistentlyMappedMemory()
    6937 {
    6938  if(m_UnmapPersistentlyMappedMemoryCounter++ == 0)
    6939  {
    6940  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
    6941  {
    6942  for(uint32_t memTypeIndex = m_MemProps.memoryTypeCount; memTypeIndex--; )
    6943  {
    6944  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    6945  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
    6946  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    6947  {
    6948  // Process DedicatedAllocations.
    6949  {
    6950  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    6951  AllocationVectorType* pDedicatedAllocationsVector = m_pDedicatedAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
    6952  for(size_t dedicatedAllocIndex = pDedicatedAllocationsVector->size(); dedicatedAllocIndex--; )
    6953  {
    6954  VmaAllocation hAlloc = (*pDedicatedAllocationsVector)[dedicatedAllocIndex];
    6955  hAlloc->DedicatedAllocUnmapPersistentlyMappedMemory(this);
    6956  }
    6957  }
    6958 
    6959  // Process normal Allocations.
    6960  {
    6961  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
    6962  pBlockVector->UnmapPersistentlyMappedMemory();
    6963  }
    6964  }
    6965  }
    6966 
    6967  // Process custom pools.
    6968  {
    6969  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    6970  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    6971  {
    6972  m_Pools[poolIndex]->GetBlockVector().UnmapPersistentlyMappedMemory();
    6973  }
    6974  }
    6975  }
    6976  }
    6977 }
    6978 
    6979 VkResult VmaAllocator_T::MapPersistentlyMappedMemory()
    6980 {
    6981  VMA_ASSERT(m_UnmapPersistentlyMappedMemoryCounter > 0);
    6982  if(--m_UnmapPersistentlyMappedMemoryCounter == 0)
    6983  {
    6984  VkResult finalResult = VK_SUCCESS;
    6985  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
    6986  {
    6987  // Process custom pools.
    6988  {
    6989  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    6990  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    6991  {
    6992  m_Pools[poolIndex]->GetBlockVector().MapPersistentlyMappedMemory();
    6993  }
    6994  }
    6995 
    6996  for(uint32_t memTypeIndex = 0; memTypeIndex < m_MemProps.memoryTypeCount; ++memTypeIndex)
    6997  {
    6998  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    6999  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
    7000  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    7001  {
    7002  // Process DedicatedAllocations.
    7003  {
    7004  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    7005  AllocationVectorType* pAllocationsVector = m_pDedicatedAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
    7006  for(size_t dedicatedAllocIndex = 0, dedicatedAllocCount = pAllocationsVector->size(); dedicatedAllocIndex < dedicatedAllocCount; ++dedicatedAllocIndex)
    7007  {
    7008  VmaAllocation hAlloc = (*pAllocationsVector)[dedicatedAllocIndex];
    7009  hAlloc->DedicatedAllocMapPersistentlyMappedMemory(this);
    7010  }
    7011  }
    7012 
    7013  // Process normal Allocations.
    7014  {
    7015  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
    7016  VkResult localResult = pBlockVector->MapPersistentlyMappedMemory();
    7017  if(localResult != VK_SUCCESS)
    7018  {
    7019  finalResult = localResult;
    7020  }
    7021  }
    7022  }
    7023  }
    7024  }
    7025  return finalResult;
    7026  }
    7027  else
    7028  return VK_SUCCESS;
    7029 }
    7030 
    7031 VkResult VmaAllocator_T::Defragment(
    7032  VmaAllocation* pAllocations,
    7033  size_t allocationCount,
    7034  VkBool32* pAllocationsChanged,
    7035  const VmaDefragmentationInfo* pDefragmentationInfo,
    7036  VmaDefragmentationStats* pDefragmentationStats)
    7037 {
    7038  if(pAllocationsChanged != VMA_NULL)
    7039  {
    7040  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
    7041  }
    7042  if(pDefragmentationStats != VMA_NULL)
    7043  {
    7044  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
    7045  }
    7046 
    7047  if(m_UnmapPersistentlyMappedMemoryCounter > 0)
    7048  {
    7049  VMA_DEBUG_LOG("ERROR: Cannot defragment when inside vmaUnmapPersistentlyMappedMemory.");
    7050  return VK_ERROR_MEMORY_MAP_FAILED;
    7051  }
    7052 
    7053  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
    7054 
    7055  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
    7056 
    7057  const size_t poolCount = m_Pools.size();
    7058 
    7059  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
    7060  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    7061  {
    7062  VmaAllocation hAlloc = pAllocations[allocIndex];
    7063  VMA_ASSERT(hAlloc);
    7064  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    7065  // DedicatedAlloc cannot be defragmented.
    7066  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    7067  // Only HOST_VISIBLE memory types can be defragmented.
    7068  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
    7069  // Lost allocation cannot be defragmented.
    7070  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    7071  {
    7072  VmaBlockVector* pAllocBlockVector = nullptr;
    7073 
    7074  const VmaPool hAllocPool = hAlloc->GetPool();
    7075  // This allocation belongs to custom pool.
    7076  if(hAllocPool != VK_NULL_HANDLE)
    7077  {
    7078  pAllocBlockVector = &hAllocPool->GetBlockVector();
    7079  }
    7080  // This allocation belongs to general pool.
    7081  else
    7082  {
    7083  pAllocBlockVector = m_pBlockVectors[memTypeIndex][hAlloc->GetBlockVectorType()];
    7084  }
    7085 
    7086  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
    7087 
    7088  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    7089  &pAllocationsChanged[allocIndex] : VMA_NULL;
    7090  pDefragmentator->AddAllocation(hAlloc, pChanged);
    7091  }
    7092  }
    7093 
    7094  VkResult result = VK_SUCCESS;
    7095 
    7096  // ======== Main processing.
    7097 
    7098  VkDeviceSize maxBytesToMove = SIZE_MAX;
    7099  uint32_t maxAllocationsToMove = UINT32_MAX;
    7100  if(pDefragmentationInfo != VMA_NULL)
    7101  {
    7102  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
    7103  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    7104  }
    7105 
    7106  // Process standard memory.
    7107  for(uint32_t memTypeIndex = 0;
    7108  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
    7109  ++memTypeIndex)
    7110  {
    7111  // Only HOST_VISIBLE memory types can be defragmented.
    7112  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    7113  {
    7114  for(uint32_t blockVectorType = 0;
    7115  (blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT) && (result == VK_SUCCESS);
    7116  ++blockVectorType)
    7117  {
    7118  result = m_pBlockVectors[memTypeIndex][blockVectorType]->Defragment(
    7119  pDefragmentationStats,
    7120  maxBytesToMove,
    7121  maxAllocationsToMove);
    7122  }
    7123  }
    7124  }
    7125 
    7126  // Process custom pools.
    7127  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
    7128  {
    7129  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
    7130  pDefragmentationStats,
    7131  maxBytesToMove,
    7132  maxAllocationsToMove);
    7133  }
    7134 
    7135  // ======== Destroy defragmentators.
    7136 
    7137  // Process custom pools.
    7138  for(size_t poolIndex = poolCount; poolIndex--; )
    7139  {
    7140  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
    7141  }
    7142 
    7143  // Process standard memory.
    7144  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
    7145  {
    7146  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    7147  {
    7148  for(size_t blockVectorType = VMA_BLOCK_VECTOR_TYPE_COUNT; blockVectorType--; )
    7149  {
    7150  m_pBlockVectors[memTypeIndex][blockVectorType]->DestroyDefragmentator();
    7151  }
    7152  }
    7153  }
    7154 
    7155  return result;
    7156 }
    7157 
    7158 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    7159 {
    7160  if(hAllocation->CanBecomeLost())
    7161  {
    7162  /*
    7163  Warning: This is a carefully designed algorithm.
    7164  Do not modify unless you really know what you're doing :)
    7165  */
    7166  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    7167  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    7168  for(;;)
    7169  {
    7170  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    7171  {
    7172  pAllocationInfo->memoryType = UINT32_MAX;
    7173  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    7174  pAllocationInfo->offset = 0;
    7175  pAllocationInfo->size = hAllocation->GetSize();
    7176  pAllocationInfo->pMappedData = VMA_NULL;
    7177  pAllocationInfo->pUserData = hAllocation->GetUserData();
    7178  return;
    7179  }
    7180  else if(localLastUseFrameIndex == localCurrFrameIndex)
    7181  {
    7182  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    7183  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    7184  pAllocationInfo->offset = hAllocation->GetOffset();
    7185  pAllocationInfo->size = hAllocation->GetSize();
    7186  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    7187  pAllocationInfo->pUserData = hAllocation->GetUserData();
    7188  return;
    7189  }
    7190  else // Last use time earlier than current time.
    7191  {
    7192  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    7193  {
    7194  localLastUseFrameIndex = localCurrFrameIndex;
    7195  }
    7196  }
    7197  }
    7198  }
    7199  // We could use the same code here, but for performance reasons we don't need to use the hAllocation.LastUseFrameIndex atomic.
    7200  else
    7201  {
    7202  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    7203  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    7204  pAllocationInfo->offset = hAllocation->GetOffset();
    7205  pAllocationInfo->size = hAllocation->GetSize();
    7206  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    7207  pAllocationInfo->pUserData = hAllocation->GetUserData();
    7208  }
    7209 }
    7210 
    7211 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    7212 {
    7213  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
    7214 
    7215  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    7216 
    7217  if(newCreateInfo.maxBlockCount == 0)
    7218  {
    7219  newCreateInfo.maxBlockCount = SIZE_MAX;
    7220  }
    7221  if(newCreateInfo.blockSize == 0)
    7222  {
    7223  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    7224  }
    7225 
    7226  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
    7227 
    7228  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    7229  if(res != VK_SUCCESS)
    7230  {
    7231  vma_delete(this, *pPool);
    7232  *pPool = VMA_NULL;
    7233  return res;
    7234  }
    7235 
    7236  // Add to m_Pools.
    7237  {
    7238  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    7239  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    7240  }
    7241 
    7242  return VK_SUCCESS;
    7243 }
    7244 
    7245 void VmaAllocator_T::DestroyPool(VmaPool pool)
    7246 {
    7247  // Remove from m_Pools.
    7248  {
    7249  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    7250  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    7251  VMA_ASSERT(success && "Pool not found in Allocator.");
    7252  }
    7253 
    7254  vma_delete(this, pool);
    7255 }
    7256 
    7257 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    7258 {
    7259  pool->m_BlockVector.GetPoolStats(pPoolStats);
    7260 }
    7261 
    7262 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    7263 {
    7264  m_CurrentFrameIndex.store(frameIndex);
    7265 }
    7266 
    7267 void VmaAllocator_T::MakePoolAllocationsLost(
    7268  VmaPool hPool,
    7269  size_t* pLostAllocationCount)
    7270 {
    7271  hPool->m_BlockVector.MakePoolAllocationsLost(
    7272  m_CurrentFrameIndex.load(),
    7273  pLostAllocationCount);
    7274 }
    7275 
    7276 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    7277 {
    7278  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST);
    7279  (*pAllocation)->InitLost();
    7280 }
    7281 
    7282 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    7283 {
    7284  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    7285 
    7286  VkResult res;
    7287  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    7288  {
    7289  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    7290  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    7291  {
    7292  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    7293  if(res == VK_SUCCESS)
    7294  {
    7295  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    7296  }
    7297  }
    7298  else
    7299  {
    7300  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    7301  }
    7302  }
    7303  else
    7304  {
    7305  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    7306  }
    7307 
    7308  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    7309  {
    7310  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    7311  }
    7312 
    7313  return res;
    7314 }
    7315 
    7316 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    7317 {
    7318  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    7319  {
    7320  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    7321  }
    7322 
    7323  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    7324 
    7325  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    7326  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    7327  {
    7328  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    7329  m_HeapSizeLimit[heapIndex] += size;
    7330  }
    7331 }
    7332 
    7333 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    7334 {
    7335  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    7336 
    7337  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    7338  {
    7339  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    7340  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex][allocation->GetBlockVectorType()];
    7341  VMA_ASSERT(pDedicatedAllocations);
    7342  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    7343  VMA_ASSERT(success);
    7344  }
    7345 
    7346  VkDeviceMemory hMemory = allocation->GetMemory();
    7347 
    7348  if(allocation->GetMappedData() != VMA_NULL)
    7349  {
    7350  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    7351  }
    7352 
    7353  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    7354 
    7355  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    7356 }
    7357 
    7358 #if VMA_STATS_STRING_ENABLED
    7359 
    7360 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    7361 {
    7362  bool dedicatedAllocationsStarted = false;
    7363  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    7364  {
    7365  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    7366  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
    7367  {
    7368  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex][blockVectorType];
    7369  VMA_ASSERT(pDedicatedAllocVector);
    7370  if(pDedicatedAllocVector->empty() == false)
    7371  {
    7372  if(dedicatedAllocationsStarted == false)
    7373  {
    7374  dedicatedAllocationsStarted = true;
    7375  json.WriteString("DedicatedAllocations");
    7376  json.BeginObject();
    7377  }
    7378 
    7379  json.BeginString("Type ");
    7380  json.ContinueString(memTypeIndex);
    7381  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
    7382  {
    7383  json.ContinueString(" Mapped");
    7384  }
    7385  json.EndString();
    7386 
    7387  json.BeginArray();
    7388 
    7389  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    7390  {
    7391  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    7392  json.BeginObject(true);
    7393 
    7394  json.WriteString("Size");
    7395  json.WriteNumber(hAlloc->GetSize());
    7396 
    7397  json.WriteString("Type");
    7398  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
    7399 
    7400  json.EndObject();
    7401  }
    7402 
    7403  json.EndArray();
    7404  }
    7405  }
    7406  }
    7407  if(dedicatedAllocationsStarted)
    7408  {
    7409  json.EndObject();
    7410  }
    7411 
    7412  {
    7413  bool allocationsStarted = false;
    7414  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    7415  {
    7416  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
    7417  {
    7418  if(m_pBlockVectors[memTypeIndex][blockVectorType]->IsEmpty() == false)
    7419  {
    7420  if(allocationsStarted == false)
    7421  {
    7422  allocationsStarted = true;
    7423  json.WriteString("DefaultPools");
    7424  json.BeginObject();
    7425  }
    7426 
    7427  json.BeginString("Type ");
    7428  json.ContinueString(memTypeIndex);
    7429  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
    7430  {
    7431  json.ContinueString(" Mapped");
    7432  }
    7433  json.EndString();
    7434 
    7435  m_pBlockVectors[memTypeIndex][blockVectorType]->PrintDetailedMap(json);
    7436  }
    7437  }
    7438  }
    7439  if(allocationsStarted)
    7440  {
    7441  json.EndObject();
    7442  }
    7443  }
    7444 
    7445  {
    7446  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    7447  const size_t poolCount = m_Pools.size();
    7448  if(poolCount > 0)
    7449  {
    7450  json.WriteString("Pools");
    7451  json.BeginArray();
    7452  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    7453  {
    7454  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    7455  }
    7456  json.EndArray();
    7457  }
    7458  }
    7459 }
    7460 
    7461 #endif // #if VMA_STATS_STRING_ENABLED
    7462 
    7463 static VkResult AllocateMemoryForImage(
    7464  VmaAllocator allocator,
    7465  VkImage image,
    7466  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    7467  VmaSuballocationType suballocType,
    7468  VmaAllocation* pAllocation)
    7469 {
    7470  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
    7471 
    7472  VkMemoryRequirements vkMemReq = {};
    7473  bool requiresDedicatedAllocation = false;
    7474  bool prefersDedicatedAllocation = false;
    7475  allocator->GetImageMemoryRequirements(image, vkMemReq,
    7476  requiresDedicatedAllocation, prefersDedicatedAllocation);
    7477 
    7478  return allocator->AllocateMemory(
    7479  vkMemReq,
    7480  requiresDedicatedAllocation,
    7481  prefersDedicatedAllocation,
    7482  VK_NULL_HANDLE, // dedicatedBuffer
    7483  image, // dedicatedImage
    7484  *pAllocationCreateInfo,
    7485  suballocType,
    7486  pAllocation);
    7487 }
    7488 
    7490 // Public interface
    7491 
    7492 VkResult vmaCreateAllocator(
    7493  const VmaAllocatorCreateInfo* pCreateInfo,
    7494  VmaAllocator* pAllocator)
    7495 {
    7496  VMA_ASSERT(pCreateInfo && pAllocator);
    7497  VMA_DEBUG_LOG("vmaCreateAllocator");
    7498  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    7499  return VK_SUCCESS;
    7500 }
    7501 
    7502 void vmaDestroyAllocator(
    7503  VmaAllocator allocator)
    7504 {
    7505  if(allocator != VK_NULL_HANDLE)
    7506  {
    7507  VMA_DEBUG_LOG("vmaDestroyAllocator");
    7508  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    7509  vma_delete(&allocationCallbacks, allocator);
    7510  }
    7511 }
    7512 
    7514  VmaAllocator allocator,
    7515  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    7516 {
    7517  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    7518  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    7519 }
    7520 
    7522  VmaAllocator allocator,
    7523  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    7524 {
    7525  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    7526  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    7527 }
    7528 
    7530  VmaAllocator allocator,
    7531  uint32_t memoryTypeIndex,
    7532  VkMemoryPropertyFlags* pFlags)
    7533 {
    7534  VMA_ASSERT(allocator && pFlags);
    7535  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    7536  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    7537 }
    7538 
    7540  VmaAllocator allocator,
    7541  uint32_t frameIndex)
    7542 {
    7543  VMA_ASSERT(allocator);
    7544  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    7545 
    7546  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7547 
    7548  allocator->SetCurrentFrameIndex(frameIndex);
    7549 }
    7550 
    7551 void vmaCalculateStats(
    7552  VmaAllocator allocator,
    7553  VmaStats* pStats)
    7554 {
    7555  VMA_ASSERT(allocator && pStats);
    7556  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7557  allocator->CalculateStats(pStats);
    7558 }
    7559 
    7560 #if VMA_STATS_STRING_ENABLED
    7561 
    7562 void vmaBuildStatsString(
    7563  VmaAllocator allocator,
    7564  char** ppStatsString,
    7565  VkBool32 detailedMap)
    7566 {
    7567  VMA_ASSERT(allocator && ppStatsString);
    7568  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7569 
    7570  VmaStringBuilder sb(allocator);
    7571  {
    7572  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    7573  json.BeginObject();
    7574 
    7575  VmaStats stats;
    7576  allocator->CalculateStats(&stats);
    7577 
    7578  json.WriteString("Total");
    7579  VmaPrintStatInfo(json, stats.total);
    7580 
    7581  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    7582  {
    7583  json.BeginString("Heap ");
    7584  json.ContinueString(heapIndex);
    7585  json.EndString();
    7586  json.BeginObject();
    7587 
    7588  json.WriteString("Size");
    7589  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    7590 
    7591  json.WriteString("Flags");
    7592  json.BeginArray(true);
    7593  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    7594  {
    7595  json.WriteString("DEVICE_LOCAL");
    7596  }
    7597  json.EndArray();
    7598 
    7599  if(stats.memoryHeap[heapIndex].blockCount > 0)
    7600  {
    7601  json.WriteString("Stats");
    7602  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    7603  }
    7604 
    7605  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    7606  {
    7607  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    7608  {
    7609  json.BeginString("Type ");
    7610  json.ContinueString(typeIndex);
    7611  json.EndString();
    7612 
    7613  json.BeginObject();
    7614 
    7615  json.WriteString("Flags");
    7616  json.BeginArray(true);
    7617  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    7618  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    7619  {
    7620  json.WriteString("DEVICE_LOCAL");
    7621  }
    7622  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    7623  {
    7624  json.WriteString("HOST_VISIBLE");
    7625  }
    7626  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    7627  {
    7628  json.WriteString("HOST_COHERENT");
    7629  }
    7630  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    7631  {
    7632  json.WriteString("HOST_CACHED");
    7633  }
    7634  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    7635  {
    7636  json.WriteString("LAZILY_ALLOCATED");
    7637  }
    7638  json.EndArray();
    7639 
    7640  if(stats.memoryType[typeIndex].blockCount > 0)
    7641  {
    7642  json.WriteString("Stats");
    7643  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    7644  }
    7645 
    7646  json.EndObject();
    7647  }
    7648  }
    7649 
    7650  json.EndObject();
    7651  }
    7652  if(detailedMap == VK_TRUE)
    7653  {
    7654  allocator->PrintDetailedMap(json);
    7655  }
    7656 
    7657  json.EndObject();
    7658  }
    7659 
    7660  const size_t len = sb.GetLength();
    7661  char* const pChars = vma_new_array(allocator, char, len + 1);
    7662  if(len > 0)
    7663  {
    7664  memcpy(pChars, sb.GetData(), len);
    7665  }
    7666  pChars[len] = '\0';
    7667  *ppStatsString = pChars;
    7668 }
    7669 
    7670 void vmaFreeStatsString(
    7671  VmaAllocator allocator,
    7672  char* pStatsString)
    7673 {
    7674  if(pStatsString != VMA_NULL)
    7675  {
    7676  VMA_ASSERT(allocator);
    7677  size_t len = strlen(pStatsString);
    7678  vma_delete_array(allocator, pStatsString, len + 1);
    7679  }
    7680 }
    7681 
    7682 #endif // #if VMA_STATS_STRING_ENABLED
    7683 
    7686 VkResult vmaFindMemoryTypeIndex(
    7687  VmaAllocator allocator,
    7688  uint32_t memoryTypeBits,
    7689  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    7690  uint32_t* pMemoryTypeIndex)
    7691 {
    7692  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    7693  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    7694  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    7695 
    7696  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    7697  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    7698  if(preferredFlags == 0)
    7699  {
    7700  preferredFlags = requiredFlags;
    7701  }
    7702  // preferredFlags, if not 0, must be a superset of requiredFlags.
    7703  VMA_ASSERT((requiredFlags & ~preferredFlags) == 0);
    7704 
    7705  // Convert usage to requiredFlags and preferredFlags.
    7706  switch(pAllocationCreateInfo->usage)
    7707  {
    7709  break;
    7711  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    7712  break;
    7714  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    7715  break;
    7717  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    7718  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    7719  break;
    7721  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    7722  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    7723  break;
    7724  default:
    7725  break;
    7726  }
    7727 
    7728  *pMemoryTypeIndex = UINT32_MAX;
    7729  uint32_t minCost = UINT32_MAX;
    7730  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    7731  memTypeIndex < allocator->GetMemoryTypeCount();
    7732  ++memTypeIndex, memTypeBit <<= 1)
    7733  {
    7734  // This memory type is acceptable according to memoryTypeBits bitmask.
    7735  if((memTypeBit & memoryTypeBits) != 0)
    7736  {
    7737  const VkMemoryPropertyFlags currFlags =
    7738  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    7739  // This memory type contains requiredFlags.
    7740  if((requiredFlags & ~currFlags) == 0)
    7741  {
    7742  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    7743  uint32_t currCost = CountBitsSet(preferredFlags & ~currFlags);
    7744  // Remember memory type with lowest cost.
    7745  if(currCost < minCost)
    7746  {
    7747  *pMemoryTypeIndex = memTypeIndex;
    7748  if(currCost == 0)
    7749  {
    7750  return VK_SUCCESS;
    7751  }
    7752  minCost = currCost;
    7753  }
    7754  }
    7755  }
    7756  }
    7757  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    7758 }
    7759 
    7760 VkResult vmaCreatePool(
    7761  VmaAllocator allocator,
    7762  const VmaPoolCreateInfo* pCreateInfo,
    7763  VmaPool* pPool)
    7764 {
    7765  VMA_ASSERT(allocator && pCreateInfo && pPool);
    7766 
    7767  VMA_DEBUG_LOG("vmaCreatePool");
    7768 
    7769  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7770 
    7771  return allocator->CreatePool(pCreateInfo, pPool);
    7772 }
    7773 
    7774 void vmaDestroyPool(
    7775  VmaAllocator allocator,
    7776  VmaPool pool)
    7777 {
    7778  VMA_ASSERT(allocator && pool);
    7779 
    7780  VMA_DEBUG_LOG("vmaDestroyPool");
    7781 
    7782  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7783 
    7784  allocator->DestroyPool(pool);
    7785 }
    7786 
    7787 void vmaGetPoolStats(
    7788  VmaAllocator allocator,
    7789  VmaPool pool,
    7790  VmaPoolStats* pPoolStats)
    7791 {
    7792  VMA_ASSERT(allocator && pool && pPoolStats);
    7793 
    7794  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7795 
    7796  allocator->GetPoolStats(pool, pPoolStats);
    7797 }
    7798 
    7800  VmaAllocator allocator,
    7801  VmaPool pool,
    7802  size_t* pLostAllocationCount)
    7803 {
    7804  VMA_ASSERT(allocator && pool);
    7805 
    7806  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7807 
    7808  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    7809 }
    7810 
    7811 VkResult vmaAllocateMemory(
    7812  VmaAllocator allocator,
    7813  const VkMemoryRequirements* pVkMemoryRequirements,
    7814  const VmaAllocationCreateInfo* pCreateInfo,
    7815  VmaAllocation* pAllocation,
    7816  VmaAllocationInfo* pAllocationInfo)
    7817 {
    7818  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    7819 
    7820  VMA_DEBUG_LOG("vmaAllocateMemory");
    7821 
    7822  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7823 
    7824  VkResult result = allocator->AllocateMemory(
    7825  *pVkMemoryRequirements,
    7826  false, // requiresDedicatedAllocation
    7827  false, // prefersDedicatedAllocation
    7828  VK_NULL_HANDLE, // dedicatedBuffer
    7829  VK_NULL_HANDLE, // dedicatedImage
    7830  *pCreateInfo,
    7831  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    7832  pAllocation);
    7833 
    7834  if(pAllocationInfo && result == VK_SUCCESS)
    7835  {
    7836  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    7837  }
    7838 
    7839  return result;
    7840 }
    7841 
    7843  VmaAllocator allocator,
    7844  VkBuffer buffer,
    7845  const VmaAllocationCreateInfo* pCreateInfo,
    7846  VmaAllocation* pAllocation,
    7847  VmaAllocationInfo* pAllocationInfo)
    7848 {
    7849  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    7850 
    7851  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    7852 
    7853  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7854 
    7855  VkMemoryRequirements vkMemReq = {};
    7856  bool requiresDedicatedAllocation = false;
    7857  bool prefersDedicatedAllocation = false;
    7858  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    7859  requiresDedicatedAllocation,
    7860  prefersDedicatedAllocation);
    7861 
    7862  VkResult result = allocator->AllocateMemory(
    7863  vkMemReq,
    7864  requiresDedicatedAllocation,
    7865  prefersDedicatedAllocation,
    7866  buffer, // dedicatedBuffer
    7867  VK_NULL_HANDLE, // dedicatedImage
    7868  *pCreateInfo,
    7869  VMA_SUBALLOCATION_TYPE_BUFFER,
    7870  pAllocation);
    7871 
    7872  if(pAllocationInfo && result == VK_SUCCESS)
    7873  {
    7874  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    7875  }
    7876 
    7877  return result;
    7878 }
    7879 
    7880 VkResult vmaAllocateMemoryForImage(
    7881  VmaAllocator allocator,
    7882  VkImage image,
    7883  const VmaAllocationCreateInfo* pCreateInfo,
    7884  VmaAllocation* pAllocation,
    7885  VmaAllocationInfo* pAllocationInfo)
    7886 {
    7887  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    7888 
    7889  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    7890 
    7891  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7892 
    7893  VkResult result = AllocateMemoryForImage(
    7894  allocator,
    7895  image,
    7896  pCreateInfo,
    7897  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    7898  pAllocation);
    7899 
    7900  if(pAllocationInfo && result == VK_SUCCESS)
    7901  {
    7902  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    7903  }
    7904 
    7905  return result;
    7906 }
    7907 
    7908 void vmaFreeMemory(
    7909  VmaAllocator allocator,
    7910  VmaAllocation allocation)
    7911 {
    7912  VMA_ASSERT(allocator && allocation);
    7913 
    7914  VMA_DEBUG_LOG("vmaFreeMemory");
    7915 
    7916  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7917 
    7918  allocator->FreeMemory(allocation);
    7919 }
    7920 
    7922  VmaAllocator allocator,
    7923  VmaAllocation allocation,
    7924  VmaAllocationInfo* pAllocationInfo)
    7925 {
    7926  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    7927 
    7928  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7929 
    7930  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    7931 }
    7932 
    7934  VmaAllocator allocator,
    7935  VmaAllocation allocation,
    7936  void* pUserData)
    7937 {
    7938  VMA_ASSERT(allocator && allocation);
    7939 
    7940  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7941 
    7942  allocation->SetUserData(pUserData);
    7943 }
    7944 
    7946  VmaAllocator allocator,
    7947  VmaAllocation* pAllocation)
    7948 {
    7949  VMA_ASSERT(allocator && pAllocation);
    7950 
    7951  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    7952 
    7953  allocator->CreateLostAllocation(pAllocation);
    7954 }
    7955 
    7956 VkResult vmaMapMemory(
    7957  VmaAllocator allocator,
    7958  VmaAllocation allocation,
    7959  void** ppData)
    7960 {
    7961  VMA_ASSERT(allocator && allocation && ppData);
    7962 
    7963  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7964 
    7965  return (*allocator->GetVulkanFunctions().vkMapMemory)(
    7966  allocator->m_hDevice,
    7967  allocation->GetMemory(),
    7968  allocation->GetOffset(),
    7969  allocation->GetSize(),
    7970  0,
    7971  ppData);
    7972 }
    7973 
    7974 void vmaUnmapMemory(
    7975  VmaAllocator allocator,
    7976  VmaAllocation allocation)
    7977 {
    7978  VMA_ASSERT(allocator && allocation);
    7979 
    7980  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7981 
    7982  (*allocator->GetVulkanFunctions().vkUnmapMemory)(allocator->m_hDevice, allocation->GetMemory());
    7983 }
    7984 
    7985 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator)
    7986 {
    7987  VMA_ASSERT(allocator);
    7988 
    7989  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7990 
    7991  allocator->UnmapPersistentlyMappedMemory();
    7992 }
    7993 
    7994 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator)
    7995 {
    7996  VMA_ASSERT(allocator);
    7997 
    7998  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7999 
    8000  return allocator->MapPersistentlyMappedMemory();
    8001 }
    8002 
    8003 VkResult vmaDefragment(
    8004  VmaAllocator allocator,
    8005  VmaAllocation* pAllocations,
    8006  size_t allocationCount,
    8007  VkBool32* pAllocationsChanged,
    8008  const VmaDefragmentationInfo *pDefragmentationInfo,
    8009  VmaDefragmentationStats* pDefragmentationStats)
    8010 {
    8011  VMA_ASSERT(allocator && pAllocations);
    8012 
    8013  VMA_DEBUG_LOG("vmaDefragment");
    8014 
    8015  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8016 
    8017  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
    8018 }
    8019 
    8020 VkResult vmaCreateBuffer(
    8021  VmaAllocator allocator,
    8022  const VkBufferCreateInfo* pBufferCreateInfo,
    8023  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    8024  VkBuffer* pBuffer,
    8025  VmaAllocation* pAllocation,
    8026  VmaAllocationInfo* pAllocationInfo)
    8027 {
    8028  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    8029 
    8030  VMA_DEBUG_LOG("vmaCreateBuffer");
    8031 
    8032  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8033 
    8034  *pBuffer = VK_NULL_HANDLE;
    8035  *pAllocation = VK_NULL_HANDLE;
    8036 
    8037  // 1. Create VkBuffer.
    8038  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    8039  allocator->m_hDevice,
    8040  pBufferCreateInfo,
    8041  allocator->GetAllocationCallbacks(),
    8042  pBuffer);
    8043  if(res >= 0)
    8044  {
    8045  // 2. vkGetBufferMemoryRequirements.
    8046  VkMemoryRequirements vkMemReq = {};
    8047  bool requiresDedicatedAllocation = false;
    8048  bool prefersDedicatedAllocation = false;
    8049  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    8050  requiresDedicatedAllocation, prefersDedicatedAllocation);
    8051 
    8052  // 3. Allocate memory using allocator.
    8053  res = allocator->AllocateMemory(
    8054  vkMemReq,
    8055  requiresDedicatedAllocation,
    8056  prefersDedicatedAllocation,
    8057  *pBuffer, // dedicatedBuffer
    8058  VK_NULL_HANDLE, // dedicatedImage
    8059  *pAllocationCreateInfo,
    8060  VMA_SUBALLOCATION_TYPE_BUFFER,
    8061  pAllocation);
    8062  if(res >= 0)
    8063  {
    8064  // 3. Bind buffer with memory.
    8065  res = (*allocator->GetVulkanFunctions().vkBindBufferMemory)(
    8066  allocator->m_hDevice,
    8067  *pBuffer,
    8068  (*pAllocation)->GetMemory(),
    8069  (*pAllocation)->GetOffset());
    8070  if(res >= 0)
    8071  {
    8072  // All steps succeeded.
    8073  if(pAllocationInfo != VMA_NULL)
    8074  {
    8075  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    8076  }
    8077  return VK_SUCCESS;
    8078  }
    8079  allocator->FreeMemory(*pAllocation);
    8080  *pAllocation = VK_NULL_HANDLE;
    8081  return res;
    8082  }
    8083  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    8084  *pBuffer = VK_NULL_HANDLE;
    8085  return res;
    8086  }
    8087  return res;
    8088 }
    8089 
    8090 void vmaDestroyBuffer(
    8091  VmaAllocator allocator,
    8092  VkBuffer buffer,
    8093  VmaAllocation allocation)
    8094 {
    8095  if(buffer != VK_NULL_HANDLE)
    8096  {
    8097  VMA_ASSERT(allocator);
    8098 
    8099  VMA_DEBUG_LOG("vmaDestroyBuffer");
    8100 
    8101  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8102 
    8103  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    8104 
    8105  allocator->FreeMemory(allocation);
    8106  }
    8107 }
    8108 
    8109 VkResult vmaCreateImage(
    8110  VmaAllocator allocator,
    8111  const VkImageCreateInfo* pImageCreateInfo,
    8112  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    8113  VkImage* pImage,
    8114  VmaAllocation* pAllocation,
    8115  VmaAllocationInfo* pAllocationInfo)
    8116 {
    8117  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    8118 
    8119  VMA_DEBUG_LOG("vmaCreateImage");
    8120 
    8121  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8122 
    8123  *pImage = VK_NULL_HANDLE;
    8124  *pAllocation = VK_NULL_HANDLE;
    8125 
    8126  // 1. Create VkImage.
    8127  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    8128  allocator->m_hDevice,
    8129  pImageCreateInfo,
    8130  allocator->GetAllocationCallbacks(),
    8131  pImage);
    8132  if(res >= 0)
    8133  {
    8134  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    8135  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    8136  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    8137 
    8138  // 2. Allocate memory using allocator.
    8139  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
    8140  if(res >= 0)
    8141  {
    8142  // 3. Bind image with memory.
    8143  res = (*allocator->GetVulkanFunctions().vkBindImageMemory)(
    8144  allocator->m_hDevice,
    8145  *pImage,
    8146  (*pAllocation)->GetMemory(),
    8147  (*pAllocation)->GetOffset());
    8148  if(res >= 0)
    8149  {
    8150  // All steps succeeded.
    8151  if(pAllocationInfo != VMA_NULL)
    8152  {
    8153  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    8154  }
    8155  return VK_SUCCESS;
    8156  }
    8157  allocator->FreeMemory(*pAllocation);
    8158  *pAllocation = VK_NULL_HANDLE;
    8159  return res;
    8160  }
    8161  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    8162  *pImage = VK_NULL_HANDLE;
    8163  return res;
    8164  }
    8165  return res;
    8166 }
    8167 
    8168 void vmaDestroyImage(
    8169  VmaAllocator allocator,
    8170  VkImage image,
    8171  VmaAllocation allocation)
    8172 {
    8173  if(image != VK_NULL_HANDLE)
    8174  {
    8175  VMA_ASSERT(allocator);
    8176 
    8177  VMA_DEBUG_LOG("vmaDestroyImage");
    8178 
    8179  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8180 
    8181  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    8182 
    8183  allocator->FreeMemory(allocation);
    8184  }
    8185 }
    8186 
    8187 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:568
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:785
    void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
    VkPhysicalDevice physicalDevice
    Vulkan physical device.
    Definition: vk_mem_alloc.h:593
    diff --git a/src/vk_mem_alloc.h b/src/vk_mem_alloc.h index 58091c2..0e92290 100644 --- a/src/vk_mem_alloc.h +++ b/src/vk_mem_alloc.h @@ -1305,7 +1305,15 @@ returned value is negative error code, *pBuffer and *pAllocation are null. If the function succeeded, you must destroy both buffer and allocation when you no longer need them using either convenience function vmaDestroyBuffer() or -separately, using vkDestroyBuffer() and vmaFreeMemory(). +separately, using `vkDestroyBuffer()` and vmaFreeMemory(). + +If VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag was used, +VK_KHR_dedicated_allocation extension is used internally to query driver whether +it requires or prefers the new buffer to have dedicated allocation. If yes, +and if dedicated allocation is possible (VmaAllocationCreateInfo::pool is null +and VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT is not used), it creates dedicated +allocation for this buffer, just like when using +VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. */ VkResult vmaCreateBuffer( VmaAllocator allocator, @@ -1323,6 +1331,8 @@ This is just a convenience function equivalent to: vkDestroyBuffer(device, buffer, allocationCallbacks); vmaFreeMemory(allocator, allocation); \endcode + +It it safe to pass null as buffer and/or allocation. */ void vmaDestroyBuffer( VmaAllocator allocator, @@ -1346,6 +1356,8 @@ This is just a convenience function equivalent to: vkDestroyImage(device, image, allocationCallbacks); vmaFreeMemory(allocator, allocation); \endcode + +It it safe to pass null as image and/or allocation. */ void vmaDestroyImage( VmaAllocator allocator,